2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
44 MLX4_COMMAND_INTERFACE_MIN_REV = 2,
45 MLX4_COMMAND_INTERFACE_MAX_REV = 3,
46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
52 static bool enable_qos;
53 module_param(enable_qos, bool, 0444);
54 MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
56 #define MLX4_GET(dest, source, offset) \
58 void *__p = (char *) (source) + (offset); \
59 switch (sizeof (dest)) { \
60 case 1: (dest) = *(u8 *) __p; break; \
61 case 2: (dest) = be16_to_cpup(__p); break; \
62 case 4: (dest) = be32_to_cpup(__p); break; \
63 case 8: (dest) = be64_to_cpup(__p); break; \
64 default: __buggy_use_of_MLX4_GET(); \
68 #define MLX4_PUT(dest, source, offset) \
70 void *__d = ((char *) (dest) + (offset)); \
71 switch (sizeof(source)) { \
72 case 1: *(u8 *) __d = (source); break; \
73 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
74 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
75 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
76 default: __buggy_use_of_MLX4_PUT(); \
80 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
82 static const char *fname[] = {
83 [ 0] = "RC transport",
84 [ 1] = "UC transport",
85 [ 2] = "UD transport",
86 [ 3] = "XRC transport",
87 [ 4] = "reliable multicast",
88 [ 5] = "FCoIB support",
90 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter",
94 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers",
98 [18] = "Atomic ops support",
99 [19] = "Raw multicast support",
100 [20] = "Address vector port checking support",
101 [21] = "UD multicast support",
102 [24] = "Demand paging support",
103 [25] = "Router support",
104 [30] = "IBoE support",
105 [32] = "Unicast loopback support",
106 [34] = "FCS header control",
107 [38] = "Wake On LAN support",
108 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support",
111 [48] = "Counters support",
112 [53] = "Port ETS Scheduler support",
113 [55] = "Port link type sensing support",
114 [59] = "Port management change event support",
115 [61] = "64 byte EQE support",
116 [62] = "64 byte CQE support",
120 mlx4_dbg(dev, "DEV_CAP flags:\n");
121 for (i = 0; i < ARRAY_SIZE(fname); ++i)
122 if (fname[i] && (flags & (1LL << i)))
123 mlx4_dbg(dev, " %s\n", fname[i]);
126 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
128 static const char * const fname[] = {
130 [1] = "RSS Toeplitz Hash Function support",
131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device managed flow steering support",
133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support",
137 [8] = "Dynamic QP updates support",
138 [9] = "Device managed flow steering IPoIB support",
139 [10] = "TCP/IP offloads/flow-steering for VXLAN support"
143 for (i = 0; i < ARRAY_SIZE(fname); ++i)
144 if (fname[i] && (flags & (1LL << i)))
145 mlx4_dbg(dev, " %s\n", fname[i]);
148 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
150 struct mlx4_cmd_mailbox *mailbox;
154 #define MOD_STAT_CFG_IN_SIZE 0x100
156 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
157 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
159 mailbox = mlx4_alloc_cmd_mailbox(dev);
161 return PTR_ERR(mailbox);
162 inbox = mailbox->buf;
164 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
165 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
167 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
168 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
170 mlx4_free_cmd_mailbox(dev, mailbox);
174 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
175 struct mlx4_vhcr *vhcr,
176 struct mlx4_cmd_mailbox *inbox,
177 struct mlx4_cmd_mailbox *outbox,
178 struct mlx4_cmd_info *cmd)
180 struct mlx4_priv *priv = mlx4_priv(dev);
182 u32 size, proxy_qp, qkey;
185 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
186 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
187 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
188 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
189 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
190 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
191 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
192 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
193 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
194 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
195 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
196 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
198 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
199 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
200 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
201 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
202 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
203 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
205 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
206 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
207 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
208 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
210 /* when opcode modifier = 1 */
211 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
212 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4
213 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
214 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
216 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
217 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
218 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
219 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
220 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
222 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
223 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
224 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
225 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
227 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
229 if (vhcr->op_modifier == 1) {
230 struct mlx4_active_ports actv_ports =
231 mlx4_get_active_ports(dev, slave);
232 int converted_port = mlx4_slave_convert_port(
233 dev, slave, vhcr->in_modifier);
235 if (converted_port < 0)
238 vhcr->in_modifier = converted_port;
239 /* phys-port = logical-port */
240 field = vhcr->in_modifier -
241 find_first_bit(actv_ports.ports, dev->caps.num_ports);
242 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
244 port = vhcr->in_modifier;
245 proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
247 /* Set nic_info bit to mark new fields support */
248 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
250 if (mlx4_vf_smi_enabled(dev, slave, port) &&
251 !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) {
252 field |= QUERY_FUNC_CAP_VF_ENABLE_QP0;
253 MLX4_PUT(outbox->buf, qkey,
254 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
256 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
258 /* size is now the QP number */
259 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
260 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
263 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
265 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY);
267 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY);
269 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
270 QUERY_FUNC_CAP_PHYS_PORT_ID);
272 } else if (vhcr->op_modifier == 0) {
273 struct mlx4_active_ports actv_ports =
274 mlx4_get_active_ports(dev, slave);
275 /* enable rdma and ethernet interfaces, and new quota locations */
276 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
277 QUERY_FUNC_CAP_FLAG_QUOTAS);
278 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
281 bitmap_weight(actv_ports.ports, dev->caps.num_ports),
282 dev->caps.num_ports);
283 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
285 size = dev->caps.function_caps; /* set PF behaviours */
286 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
288 field = 0; /* protected FMR support not available as yet */
289 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
291 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
292 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
293 size = dev->caps.num_qps;
294 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
296 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
297 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
298 size = dev->caps.num_srqs;
299 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
301 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
302 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
303 size = dev->caps.num_cqs;
304 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
306 size = dev->caps.num_eqs;
307 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
309 size = dev->caps.reserved_eqs;
310 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
312 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
313 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
314 size = dev->caps.num_mpts;
315 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
317 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
318 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
319 size = dev->caps.num_mtts;
320 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
322 size = dev->caps.num_mgms + dev->caps.num_amgms;
323 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
324 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
332 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
333 struct mlx4_func_cap *func_cap)
335 struct mlx4_cmd_mailbox *mailbox;
337 u8 field, op_modifier;
339 int err = 0, quotas = 0;
341 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
343 mailbox = mlx4_alloc_cmd_mailbox(dev);
345 return PTR_ERR(mailbox);
347 err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier,
348 MLX4_CMD_QUERY_FUNC_CAP,
349 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
353 outbox = mailbox->buf;
356 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
357 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
358 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
359 err = -EPROTONOSUPPORT;
362 func_cap->flags = field;
363 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
365 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
366 func_cap->num_ports = field;
368 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
369 func_cap->pf_context_behaviour = size;
372 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
373 func_cap->qp_quota = size & 0xFFFFFF;
375 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
376 func_cap->srq_quota = size & 0xFFFFFF;
378 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
379 func_cap->cq_quota = size & 0xFFFFFF;
381 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
382 func_cap->mpt_quota = size & 0xFFFFFF;
384 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
385 func_cap->mtt_quota = size & 0xFFFFFF;
387 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
388 func_cap->mcg_quota = size & 0xFFFFFF;
391 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
392 func_cap->qp_quota = size & 0xFFFFFF;
394 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
395 func_cap->srq_quota = size & 0xFFFFFF;
397 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
398 func_cap->cq_quota = size & 0xFFFFFF;
400 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
401 func_cap->mpt_quota = size & 0xFFFFFF;
403 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
404 func_cap->mtt_quota = size & 0xFFFFFF;
406 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
407 func_cap->mcg_quota = size & 0xFFFFFF;
409 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
410 func_cap->max_eq = size & 0xFFFFFF;
412 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
413 func_cap->reserved_eq = size & 0xFFFFFF;
418 /* logical port query */
419 if (gen_or_port > dev->caps.num_ports) {
424 MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
425 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
426 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) {
427 mlx4_err(dev, "VLAN is enforced on this port\n");
428 err = -EPROTONOSUPPORT;
432 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
433 mlx4_err(dev, "Force mac is enabled on this port\n");
434 err = -EPROTONOSUPPORT;
437 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
438 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
439 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
440 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
441 err = -EPROTONOSUPPORT;
446 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
447 func_cap->physical_port = field;
448 if (func_cap->physical_port != gen_or_port) {
453 if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) {
454 MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
455 func_cap->qp0_qkey = qkey;
457 func_cap->qp0_qkey = 0;
460 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
461 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
463 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
464 func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
466 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
467 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
469 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
470 func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
472 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
473 MLX4_GET(func_cap->phys_port_id, outbox,
474 QUERY_FUNC_CAP_PHYS_PORT_ID);
476 /* All other resources are allocated by the master, but we still report
477 * 'num' and 'reserved' capabilities as follows:
478 * - num remains the maximum resource index
479 * - 'num - reserved' is the total available objects of a resource, but
480 * resource indices may be less than 'reserved'
481 * TODO: set per-resource quotas */
484 mlx4_free_cmd_mailbox(dev, mailbox);
489 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
491 struct mlx4_cmd_mailbox *mailbox;
494 u32 field32, flags, ext_flags;
500 #define QUERY_DEV_CAP_OUT_SIZE 0x100
501 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
502 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
503 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
504 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
505 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
506 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
507 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
508 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
509 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
510 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
511 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
512 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
513 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
514 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
515 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
516 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
517 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
518 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
519 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
520 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
521 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
522 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
523 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
524 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
525 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
526 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
527 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
528 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
529 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
530 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
531 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
532 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
533 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
534 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
535 #define QUERY_DEV_CAP_WOL_OFFSET 0x43
536 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
537 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
538 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
539 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
540 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
541 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
542 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
543 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
544 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
545 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
546 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
547 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
548 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
549 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
550 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
551 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
552 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
553 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
554 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
555 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
556 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
557 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
558 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
559 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
560 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
561 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
562 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
563 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
564 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
565 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
566 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
567 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
568 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
569 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
570 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
571 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
572 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
573 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
574 #define QUERY_DEV_CAP_VXLAN 0x9e
577 mailbox = mlx4_alloc_cmd_mailbox(dev);
579 return PTR_ERR(mailbox);
580 outbox = mailbox->buf;
582 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
583 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
587 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
588 dev_cap->reserved_qps = 1 << (field & 0xf);
589 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
590 dev_cap->max_qps = 1 << (field & 0x1f);
591 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
592 dev_cap->reserved_srqs = 1 << (field >> 4);
593 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
594 dev_cap->max_srqs = 1 << (field & 0x1f);
595 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
596 dev_cap->max_cq_sz = 1 << field;
597 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
598 dev_cap->reserved_cqs = 1 << (field & 0xf);
599 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
600 dev_cap->max_cqs = 1 << (field & 0x1f);
601 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
602 dev_cap->max_mpts = 1 << (field & 0x3f);
603 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
604 dev_cap->reserved_eqs = field & 0xf;
605 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
606 dev_cap->max_eqs = 1 << (field & 0xf);
607 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
608 dev_cap->reserved_mtts = 1 << (field >> 4);
609 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
610 dev_cap->max_mrw_sz = 1 << field;
611 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
612 dev_cap->reserved_mrws = 1 << (field & 0xf);
613 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
614 dev_cap->max_mtt_seg = 1 << (field & 0x3f);
615 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
616 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
617 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
618 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
619 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
622 dev_cap->max_gso_sz = 0;
624 dev_cap->max_gso_sz = 1 << field;
626 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
628 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
630 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
633 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
634 dev_cap->max_rss_tbl_sz = 1 << field;
636 dev_cap->max_rss_tbl_sz = 0;
637 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
638 dev_cap->max_rdma_global = 1 << (field & 0x3f);
639 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
640 dev_cap->local_ca_ack_delay = field & 0x1f;
641 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
642 dev_cap->num_ports = field & 0xf;
643 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
644 dev_cap->max_msg_sz = 1 << (field & 0x1f);
645 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
647 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
648 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
649 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
651 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
652 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
653 dev_cap->fs_max_num_qp_per_entry = field;
654 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
655 dev_cap->stat_rate_support = stat_rate;
656 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
658 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
659 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
660 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
661 dev_cap->flags = flags | (u64)ext_flags << 32;
662 MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
663 dev_cap->wol_port[1] = !!(field & 0x20);
664 dev_cap->wol_port[2] = !!(field & 0x40);
665 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
666 dev_cap->reserved_uars = field >> 4;
667 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
668 dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
669 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
670 dev_cap->min_page_sz = 1 << field;
672 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
674 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
675 dev_cap->bf_reg_size = 1 << (field & 0x1f);
676 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
677 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
679 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
680 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
681 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
683 dev_cap->bf_reg_size = 0;
684 mlx4_dbg(dev, "BlueFlame not available\n");
687 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
688 dev_cap->max_sq_sg = field;
689 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
690 dev_cap->max_sq_desc_sz = size;
692 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
693 dev_cap->max_qp_per_mcg = 1 << field;
694 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
695 dev_cap->reserved_mgms = field & 0xf;
696 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
697 dev_cap->max_mcgs = 1 << field;
698 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
699 dev_cap->reserved_pds = field >> 4;
700 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
701 dev_cap->max_pds = 1 << (field & 0x3f);
702 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
703 dev_cap->reserved_xrcds = field >> 4;
704 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
705 dev_cap->max_xrcds = 1 << (field & 0x1f);
707 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
708 dev_cap->rdmarc_entry_sz = size;
709 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
710 dev_cap->qpc_entry_sz = size;
711 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
712 dev_cap->aux_entry_sz = size;
713 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
714 dev_cap->altc_entry_sz = size;
715 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
716 dev_cap->eqc_entry_sz = size;
717 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
718 dev_cap->cqc_entry_sz = size;
719 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
720 dev_cap->srq_entry_sz = size;
721 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
722 dev_cap->cmpt_entry_sz = size;
723 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
724 dev_cap->mtt_entry_sz = size;
725 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
726 dev_cap->dmpt_entry_sz = size;
728 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
729 dev_cap->max_srq_sz = 1 << field;
730 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
731 dev_cap->max_qp_sz = 1 << field;
732 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
733 dev_cap->resize_srq = field & 1;
734 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
735 dev_cap->max_rq_sg = field;
736 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
737 dev_cap->max_rq_desc_sz = size;
739 MLX4_GET(dev_cap->bmme_flags, outbox,
740 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
741 MLX4_GET(dev_cap->reserved_lkey, outbox,
742 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
743 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
745 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
746 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
748 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
749 MLX4_GET(dev_cap->max_icm_sz, outbox,
750 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
751 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
752 MLX4_GET(dev_cap->max_counters, outbox,
753 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
755 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
756 if (field32 & (1 << 16))
757 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
758 if (field32 & (1 << 26))
759 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
760 if (field32 & (1 << 20))
761 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
763 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
764 for (i = 1; i <= dev_cap->num_ports; ++i) {
765 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
766 dev_cap->max_vl[i] = field >> 4;
767 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
768 dev_cap->ib_mtu[i] = field >> 4;
769 dev_cap->max_port_width[i] = field & 0xf;
770 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
771 dev_cap->max_gids[i] = 1 << (field & 0xf);
772 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
773 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
776 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
777 #define QUERY_PORT_MTU_OFFSET 0x01
778 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
779 #define QUERY_PORT_WIDTH_OFFSET 0x06
780 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
781 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
782 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
783 #define QUERY_PORT_MAC_OFFSET 0x10
784 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
785 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
786 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
788 for (i = 1; i <= dev_cap->num_ports; ++i) {
789 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
790 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
794 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
795 dev_cap->supported_port_types[i] = field & 3;
796 dev_cap->suggested_type[i] = (field >> 3) & 1;
797 dev_cap->default_sense[i] = (field >> 4) & 1;
798 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
799 dev_cap->ib_mtu[i] = field & 0xf;
800 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
801 dev_cap->max_port_width[i] = field & 0xf;
802 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
803 dev_cap->max_gids[i] = 1 << (field >> 4);
804 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
805 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
806 dev_cap->max_vl[i] = field & 0xf;
807 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
808 dev_cap->log_max_macs[i] = field & 0xf;
809 dev_cap->log_max_vlans[i] = field >> 4;
810 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
811 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
812 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
813 dev_cap->trans_type[i] = field32 >> 24;
814 dev_cap->vendor_oui[i] = field32 & 0xffffff;
815 MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
816 MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
820 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
821 dev_cap->bmme_flags, dev_cap->reserved_lkey);
824 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
825 * we can't use any EQs whose doorbell falls on that page,
826 * even if the EQ itself isn't reserved.
828 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
829 dev_cap->reserved_eqs);
831 mlx4_dbg(dev, "Max ICM size %lld MB\n",
832 (unsigned long long) dev_cap->max_icm_sz >> 20);
833 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
834 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
835 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
836 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
837 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
838 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
839 mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
840 dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
841 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
842 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
843 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
844 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
845 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
846 dev_cap->max_pds, dev_cap->reserved_mgms);
847 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
848 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
849 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
850 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
851 dev_cap->max_port_width[1]);
852 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
853 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
854 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
855 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
856 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
857 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
858 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
860 dump_dev_cap_flags(dev, dev_cap->flags);
861 dump_dev_cap_flags2(dev, dev_cap->flags2);
864 mlx4_free_cmd_mailbox(dev, mailbox);
868 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
869 struct mlx4_vhcr *vhcr,
870 struct mlx4_cmd_mailbox *inbox,
871 struct mlx4_cmd_mailbox *outbox,
872 struct mlx4_cmd_info *cmd)
881 struct mlx4_active_ports actv_ports;
883 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
884 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
888 /* add port mng change event capability and disable mw type 1
889 * unconditionally to slaves
891 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
892 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
893 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
894 actv_ports = mlx4_get_active_ports(dev, slave);
895 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
896 for (slave_port = 0, real_port = first_port;
897 real_port < first_port +
898 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
899 ++real_port, ++slave_port) {
900 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
901 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
903 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
905 for (; slave_port < dev->caps.num_ports; ++slave_port)
906 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
907 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
909 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
911 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
912 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
914 /* For guests, disable timestamp */
915 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
917 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
919 /* For guests, disable vxlan tunneling */
920 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
922 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
924 /* For guests, report Blueflame disabled */
925 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
927 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
929 /* For guests, disable mw type 2 */
930 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
931 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
932 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
934 /* turn off device-managed steering capability if not enabled */
935 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
936 MLX4_GET(field, outbox->buf,
937 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
939 MLX4_PUT(outbox->buf, field,
940 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
943 /* turn off ipoib managed steering for guests */
944 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
946 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
951 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
952 struct mlx4_vhcr *vhcr,
953 struct mlx4_cmd_mailbox *inbox,
954 struct mlx4_cmd_mailbox *outbox,
955 struct mlx4_cmd_info *cmd)
957 struct mlx4_priv *priv = mlx4_priv(dev);
962 int admin_link_state;
963 int port = mlx4_slave_convert_port(dev, slave,
964 vhcr->in_modifier & 0xFF);
966 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
967 #define MLX4_PORT_LINK_UP_MASK 0x80
968 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
969 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
974 vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
977 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
978 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
981 if (!err && dev->caps.function != slave) {
982 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
983 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
985 /* get port type - currently only eth is enabled */
986 MLX4_GET(port_type, outbox->buf,
987 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
989 /* No link sensing allowed */
990 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
991 /* set port type to currently operating port type */
992 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
994 admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
995 if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
996 port_type |= MLX4_PORT_LINK_UP_MASK;
997 else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
998 port_type &= ~MLX4_PORT_LINK_UP_MASK;
1000 MLX4_PUT(outbox->buf, port_type,
1001 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1003 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
1004 short_field = mlx4_get_slave_num_gids(dev, slave, port);
1006 short_field = 1; /* slave max gids */
1007 MLX4_PUT(outbox->buf, short_field,
1008 QUERY_PORT_CUR_MAX_GID_OFFSET);
1010 short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
1011 MLX4_PUT(outbox->buf, short_field,
1012 QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1018 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
1019 int *gid_tbl_len, int *pkey_tbl_len)
1021 struct mlx4_cmd_mailbox *mailbox;
1026 mailbox = mlx4_alloc_cmd_mailbox(dev);
1027 if (IS_ERR(mailbox))
1028 return PTR_ERR(mailbox);
1030 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
1031 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1036 outbox = mailbox->buf;
1038 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
1039 *gid_tbl_len = field;
1041 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1042 *pkey_tbl_len = field;
1045 mlx4_free_cmd_mailbox(dev, mailbox);
1048 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
1050 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1052 struct mlx4_cmd_mailbox *mailbox;
1053 struct mlx4_icm_iter iter;
1061 mailbox = mlx4_alloc_cmd_mailbox(dev);
1062 if (IS_ERR(mailbox))
1063 return PTR_ERR(mailbox);
1064 pages = mailbox->buf;
1066 for (mlx4_icm_first(icm, &iter);
1067 !mlx4_icm_last(&iter);
1068 mlx4_icm_next(&iter)) {
1070 * We have to pass pages that are aligned to their
1071 * size, so find the least significant 1 in the
1072 * address or size and use that as our log2 size.
1074 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1075 if (lg < MLX4_ICM_PAGE_SHIFT) {
1076 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1078 (unsigned long long) mlx4_icm_addr(&iter),
1079 mlx4_icm_size(&iter));
1084 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
1086 pages[nent * 2] = cpu_to_be64(virt);
1090 pages[nent * 2 + 1] =
1091 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
1092 (lg - MLX4_ICM_PAGE_SHIFT));
1093 ts += 1 << (lg - 10);
1096 if (++nent == MLX4_MAILBOX_SIZE / 16) {
1097 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1098 MLX4_CMD_TIME_CLASS_B,
1108 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1109 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1114 case MLX4_CMD_MAP_FA:
1115 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1117 case MLX4_CMD_MAP_ICM_AUX:
1118 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1120 case MLX4_CMD_MAP_ICM:
1121 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1122 tc, ts, (unsigned long long) virt - (ts << 10));
1127 mlx4_free_cmd_mailbox(dev, mailbox);
1131 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1133 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1136 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1138 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1139 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1143 int mlx4_RUN_FW(struct mlx4_dev *dev)
1145 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1146 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1149 int mlx4_QUERY_FW(struct mlx4_dev *dev)
1151 struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
1152 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1153 struct mlx4_cmd_mailbox *mailbox;
1160 #define QUERY_FW_OUT_SIZE 0x100
1161 #define QUERY_FW_VER_OFFSET 0x00
1162 #define QUERY_FW_PPF_ID 0x09
1163 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1164 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1165 #define QUERY_FW_ERR_START_OFFSET 0x30
1166 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1167 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1169 #define QUERY_FW_SIZE_OFFSET 0x00
1170 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1171 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1173 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1174 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1176 #define QUERY_FW_CLOCK_OFFSET 0x50
1177 #define QUERY_FW_CLOCK_BAR 0x58
1179 mailbox = mlx4_alloc_cmd_mailbox(dev);
1180 if (IS_ERR(mailbox))
1181 return PTR_ERR(mailbox);
1182 outbox = mailbox->buf;
1184 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1185 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1189 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1191 * FW subminor version is at more significant bits than minor
1192 * version, so swap here.
1194 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1195 ((fw_ver & 0xffff0000ull) >> 16) |
1196 ((fw_ver & 0x0000ffffull) << 16);
1198 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1199 dev->caps.function = lg;
1201 if (mlx4_is_slave(dev))
1205 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1206 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1207 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1208 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1210 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1211 (int) (dev->caps.fw_ver >> 32),
1212 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1213 (int) dev->caps.fw_ver & 0xffff);
1214 mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1215 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1220 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1221 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1223 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1224 cmd->max_cmds = 1 << lg;
1226 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1227 (int) (dev->caps.fw_ver >> 32),
1228 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1229 (int) dev->caps.fw_ver & 0xffff,
1230 cmd_if_rev, cmd->max_cmds);
1232 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1233 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
1234 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
1235 fw->catas_bar = (fw->catas_bar >> 6) * 2;
1237 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1238 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1240 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
1241 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1242 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1243 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1245 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1246 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
1247 fw->comm_bar = (fw->comm_bar >> 6) * 2;
1248 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1249 fw->comm_bar, fw->comm_base);
1250 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1252 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1253 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
1254 fw->clock_bar = (fw->clock_bar >> 6) * 2;
1255 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1256 fw->clock_bar, fw->clock_offset);
1259 * Round up number of system pages needed in case
1260 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1263 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1264 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1266 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1267 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1270 mlx4_free_cmd_mailbox(dev, mailbox);
1274 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1275 struct mlx4_vhcr *vhcr,
1276 struct mlx4_cmd_mailbox *inbox,
1277 struct mlx4_cmd_mailbox *outbox,
1278 struct mlx4_cmd_info *cmd)
1283 outbuf = outbox->buf;
1284 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1285 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1289 /* for slaves, set pci PPF ID to invalid and zero out everything
1290 * else except FW version */
1291 outbuf[0] = outbuf[1] = 0;
1292 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1293 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1298 static void get_board_id(void *vsd, char *board_id)
1302 #define VSD_OFFSET_SIG1 0x00
1303 #define VSD_OFFSET_SIG2 0xde
1304 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1305 #define VSD_OFFSET_TS_BOARD_ID 0x20
1307 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1309 memset(board_id, 0, MLX4_BOARD_ID_LEN);
1311 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1312 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1313 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1316 * The board ID is a string but the firmware byte
1317 * swaps each 4-byte word before passing it back to
1318 * us. Therefore we need to swab it before printing.
1320 for (i = 0; i < 4; ++i)
1321 ((u32 *) board_id)[i] =
1322 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1326 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1328 struct mlx4_cmd_mailbox *mailbox;
1332 #define QUERY_ADAPTER_OUT_SIZE 0x100
1333 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1334 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1336 mailbox = mlx4_alloc_cmd_mailbox(dev);
1337 if (IS_ERR(mailbox))
1338 return PTR_ERR(mailbox);
1339 outbox = mailbox->buf;
1341 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1342 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1346 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1348 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1352 mlx4_free_cmd_mailbox(dev, mailbox);
1356 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1358 struct mlx4_cmd_mailbox *mailbox;
1362 #define INIT_HCA_IN_SIZE 0x200
1363 #define INIT_HCA_VERSION_OFFSET 0x000
1364 #define INIT_HCA_VERSION 2
1365 #define INIT_HCA_VXLAN_OFFSET 0x0c
1366 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1367 #define INIT_HCA_FLAGS_OFFSET 0x014
1368 #define INIT_HCA_QPC_OFFSET 0x020
1369 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1370 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1371 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1372 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1373 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1374 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1375 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1376 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1377 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1378 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1379 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1380 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1381 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1382 #define INIT_HCA_MCAST_OFFSET 0x0c0
1383 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1384 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1385 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1386 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1387 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1388 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1389 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1390 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1391 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1392 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1393 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1394 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1395 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1396 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1397 #define INIT_HCA_TPT_OFFSET 0x0f0
1398 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1399 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1400 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1401 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1402 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1403 #define INIT_HCA_UAR_OFFSET 0x120
1404 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1405 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1407 mailbox = mlx4_alloc_cmd_mailbox(dev);
1408 if (IS_ERR(mailbox))
1409 return PTR_ERR(mailbox);
1410 inbox = mailbox->buf;
1412 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1414 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1415 (ilog2(cache_line_size()) - 4) << 5;
1417 #if defined(__LITTLE_ENDIAN)
1418 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1419 #elif defined(__BIG_ENDIAN)
1420 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1422 #error Host endianness not defined
1424 /* Check port for UD address vector: */
1425 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1427 /* Enable IPoIB checksumming if we can: */
1428 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1429 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1431 /* Enable QoS support if module parameter set */
1433 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1435 /* enable counters */
1436 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1437 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1439 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1440 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1441 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1442 dev->caps.eqe_size = 64;
1443 dev->caps.eqe_factor = 1;
1445 dev->caps.eqe_size = 32;
1446 dev->caps.eqe_factor = 0;
1449 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1450 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1451 dev->caps.cqe_size = 64;
1452 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
1454 dev->caps.cqe_size = 32;
1457 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1459 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
1460 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
1461 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
1462 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1463 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
1464 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
1465 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
1466 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
1467 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
1468 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
1469 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1470 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1472 /* steering attributes */
1473 if (dev->caps.steering_mode ==
1474 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1475 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1477 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1479 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1480 MLX4_PUT(inbox, param->log_mc_entry_sz,
1481 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1482 MLX4_PUT(inbox, param->log_mc_table_sz,
1483 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1484 /* Enable Ethernet flow steering
1485 * with udp unicast and tcp unicast
1487 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1488 INIT_HCA_FS_ETH_BITS_OFFSET);
1489 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1490 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1491 /* Enable IPoIB flow steering
1492 * with udp unicast and tcp unicast
1494 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1495 INIT_HCA_FS_IB_BITS_OFFSET);
1496 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1497 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1499 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1500 MLX4_PUT(inbox, param->log_mc_entry_sz,
1501 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1502 MLX4_PUT(inbox, param->log_mc_hash_sz,
1503 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1504 MLX4_PUT(inbox, param->log_mc_table_sz,
1505 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1506 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1507 MLX4_PUT(inbox, (u8) (1 << 3),
1508 INIT_HCA_UC_STEERING_OFFSET);
1511 /* TPT attributes */
1513 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
1514 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
1515 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1516 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
1517 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
1519 /* UAR attributes */
1521 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1522 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
1524 /* set parser VXLAN attributes */
1525 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
1526 u8 parser_params = 0;
1527 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
1530 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
1534 mlx4_err(dev, "INIT_HCA returns %d\n", err);
1536 mlx4_free_cmd_mailbox(dev, mailbox);
1540 int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1541 struct mlx4_init_hca_param *param)
1543 struct mlx4_cmd_mailbox *mailbox;
1550 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1551 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1553 mailbox = mlx4_alloc_cmd_mailbox(dev);
1554 if (IS_ERR(mailbox))
1555 return PTR_ERR(mailbox);
1556 outbox = mailbox->buf;
1558 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1560 MLX4_CMD_TIME_CLASS_B,
1561 !mlx4_is_slave(dev));
1565 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1566 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1568 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1570 MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
1571 param->qpc_base = qword_field & ~((u64)0x1f);
1572 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
1573 param->log_num_qps = byte_field & 0x1f;
1574 MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
1575 param->srqc_base = qword_field & ~((u64)0x1f);
1576 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
1577 param->log_num_srqs = byte_field & 0x1f;
1578 MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
1579 param->cqc_base = qword_field & ~((u64)0x1f);
1580 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
1581 param->log_num_cqs = byte_field & 0x1f;
1582 MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
1583 param->altc_base = qword_field;
1584 MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
1585 param->auxc_base = qword_field;
1586 MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
1587 param->eqc_base = qword_field & ~((u64)0x1f);
1588 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
1589 param->log_num_eqs = byte_field & 0x1f;
1590 MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1591 param->rdmarc_base = qword_field & ~((u64)0x1f);
1592 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
1593 param->log_rd_per_qp = byte_field & 0x7;
1595 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1596 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1597 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1599 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1600 if (byte_field & 0x8)
1601 param->steering_mode = MLX4_STEERING_MODE_B0;
1603 param->steering_mode = MLX4_STEERING_MODE_A0;
1605 /* steering attributes */
1606 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1607 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1608 MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1609 param->log_mc_entry_sz = byte_field & 0x1f;
1610 MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1611 param->log_mc_table_sz = byte_field & 0x1f;
1613 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1614 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1615 param->log_mc_entry_sz = byte_field & 0x1f;
1616 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1617 param->log_mc_hash_sz = byte_field & 0x1f;
1618 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1619 param->log_mc_table_sz = byte_field & 0x1f;
1622 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1623 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
1624 if (byte_field & 0x20) /* 64-bytes eqe enabled */
1625 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
1626 if (byte_field & 0x40) /* 64-bytes cqe enabled */
1627 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
1629 /* TPT attributes */
1631 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
1632 MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
1633 param->mw_enabled = byte_field >> 7;
1634 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
1635 param->log_mpt_sz = byte_field & 0x3f;
1636 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
1637 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
1639 /* UAR attributes */
1641 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1642 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
1643 param->log_uar_sz = byte_field & 0xf;
1646 mlx4_free_cmd_mailbox(dev, mailbox);
1651 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1652 * and real QP0 are active, so that the paravirtualized QP0 is ready
1654 static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
1656 struct mlx4_priv *priv = mlx4_priv(dev);
1657 /* irrelevant if not infiniband */
1658 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
1659 priv->mfunc.master.qp0_state[port].qp0_active)
1664 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1665 struct mlx4_vhcr *vhcr,
1666 struct mlx4_cmd_mailbox *inbox,
1667 struct mlx4_cmd_mailbox *outbox,
1668 struct mlx4_cmd_info *cmd)
1670 struct mlx4_priv *priv = mlx4_priv(dev);
1671 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1677 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1680 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1681 /* Enable port only if it was previously disabled */
1682 if (!priv->mfunc.master.init_port_ref[port]) {
1683 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1684 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1688 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1690 if (slave == mlx4_master_func_num(dev)) {
1691 if (check_qp0_state(dev, slave, port) &&
1692 !priv->mfunc.master.qp0_state[port].port_active) {
1693 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1694 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1697 priv->mfunc.master.qp0_state[port].port_active = 1;
1698 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1701 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1703 ++priv->mfunc.master.init_port_ref[port];
1707 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1709 struct mlx4_cmd_mailbox *mailbox;
1715 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1716 #define INIT_PORT_IN_SIZE 256
1717 #define INIT_PORT_FLAGS_OFFSET 0x00
1718 #define INIT_PORT_FLAG_SIG (1 << 18)
1719 #define INIT_PORT_FLAG_NG (1 << 17)
1720 #define INIT_PORT_FLAG_G0 (1 << 16)
1721 #define INIT_PORT_VL_SHIFT 4
1722 #define INIT_PORT_PORT_WIDTH_SHIFT 8
1723 #define INIT_PORT_MTU_OFFSET 0x04
1724 #define INIT_PORT_MAX_GID_OFFSET 0x06
1725 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
1726 #define INIT_PORT_GUID0_OFFSET 0x10
1727 #define INIT_PORT_NODE_GUID_OFFSET 0x18
1728 #define INIT_PORT_SI_GUID_OFFSET 0x20
1730 mailbox = mlx4_alloc_cmd_mailbox(dev);
1731 if (IS_ERR(mailbox))
1732 return PTR_ERR(mailbox);
1733 inbox = mailbox->buf;
1736 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
1737 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
1738 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
1740 field = 128 << dev->caps.ib_mtu_cap[port];
1741 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
1742 field = dev->caps.gid_table_len[port];
1743 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
1744 field = dev->caps.pkey_table_len[port];
1745 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
1747 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
1748 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1750 mlx4_free_cmd_mailbox(dev, mailbox);
1752 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1753 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1757 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
1759 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1760 struct mlx4_vhcr *vhcr,
1761 struct mlx4_cmd_mailbox *inbox,
1762 struct mlx4_cmd_mailbox *outbox,
1763 struct mlx4_cmd_info *cmd)
1765 struct mlx4_priv *priv = mlx4_priv(dev);
1766 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1772 if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
1776 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1777 if (priv->mfunc.master.init_port_ref[port] == 1) {
1778 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1779 1000, MLX4_CMD_NATIVE);
1783 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1785 /* infiniband port */
1786 if (slave == mlx4_master_func_num(dev)) {
1787 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
1788 priv->mfunc.master.qp0_state[port].port_active) {
1789 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1790 1000, MLX4_CMD_NATIVE);
1793 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1794 priv->mfunc.master.qp0_state[port].port_active = 0;
1797 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1799 --priv->mfunc.master.init_port_ref[port];
1803 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
1805 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
1808 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
1810 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
1812 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
1816 struct mlx4_config_dev {
1817 __be32 update_flags;
1819 __be16 vxlan_udp_dport;
1823 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
1825 static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
1828 struct mlx4_cmd_mailbox *mailbox;
1830 mailbox = mlx4_alloc_cmd_mailbox(dev);
1831 if (IS_ERR(mailbox))
1832 return PTR_ERR(mailbox);
1834 memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
1836 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
1837 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1839 mlx4_free_cmd_mailbox(dev, mailbox);
1843 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
1845 struct mlx4_config_dev config_dev;
1847 memset(&config_dev, 0, sizeof(config_dev));
1848 config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
1849 config_dev.vxlan_udp_dport = udp_port;
1851 return mlx4_CONFIG_DEV(dev, &config_dev);
1853 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
1856 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
1858 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
1859 MLX4_CMD_SET_ICM_SIZE,
1860 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1865 * Round up number of system pages needed in case
1866 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1868 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1869 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1874 int mlx4_NOP(struct mlx4_dev *dev)
1876 /* Input modifier of 0x1f means "finish as soon as possible." */
1877 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
1880 int mlx4_get_phys_port_id(struct mlx4_dev *dev)
1884 struct mlx4_cmd_mailbox *mailbox;
1886 u32 guid_hi, guid_lo;
1888 #define MOD_STAT_CFG_PORT_OFFSET 8
1889 #define MOD_STAT_CFG_GUID_H 0X14
1890 #define MOD_STAT_CFG_GUID_L 0X1c
1892 mailbox = mlx4_alloc_cmd_mailbox(dev);
1893 if (IS_ERR(mailbox))
1894 return PTR_ERR(mailbox);
1895 outbox = mailbox->buf;
1897 for (port = 1; port <= dev->caps.num_ports; port++) {
1898 in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
1899 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
1900 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
1903 mlx4_err(dev, "Fail to get port %d uplink guid\n",
1907 MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
1908 MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
1909 dev->caps.phys_port_id[port] = (u64)guid_lo |
1913 mlx4_free_cmd_mailbox(dev, mailbox);
1917 #define MLX4_WOL_SETUP_MODE (5 << 28)
1918 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
1920 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1922 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
1923 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
1926 EXPORT_SYMBOL_GPL(mlx4_wol_read);
1928 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
1930 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1932 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
1933 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1935 EXPORT_SYMBOL_GPL(mlx4_wol_write);
1942 void mlx4_opreq_action(struct work_struct *work)
1944 struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
1946 struct mlx4_dev *dev = &priv->dev;
1947 int num_tasks = atomic_read(&priv->opreq_count);
1948 struct mlx4_cmd_mailbox *mailbox;
1949 struct mlx4_mgm *mgm;
1961 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
1962 #define GET_OP_REQ_TOKEN_OFFSET 0x14
1963 #define GET_OP_REQ_TYPE_OFFSET 0x1a
1964 #define GET_OP_REQ_DATA_OFFSET 0x20
1966 mailbox = mlx4_alloc_cmd_mailbox(dev);
1967 if (IS_ERR(mailbox)) {
1968 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
1971 outbox = mailbox->buf;
1974 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1975 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1978 mlx4_err(dev, "Failed to retrieve required operation: %d\n",
1982 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
1983 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
1984 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
1989 if (dev->caps.steering_mode ==
1990 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1991 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
1995 mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
1996 GET_OP_REQ_DATA_OFFSET);
1997 num_qps = be32_to_cpu(mgm->members_count) &
1999 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
2000 prot = ((u8 *)(&mgm->members_count))[0] >> 6;
2002 for (i = 0; i < num_qps; i++) {
2003 qp.qpn = be32_to_cpu(mgm->qp[i]);
2005 err = mlx4_multicast_detach(dev, &qp,
2009 err = mlx4_multicast_attach(dev, &qp,
2019 mlx4_warn(dev, "Bad type for required operation\n");
2023 err = mlx4_cmd(dev, 0, ((u32) err |
2024 (__force u32)cpu_to_be32(token) << 16),
2025 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2028 mlx4_err(dev, "Failed to acknowledge required request: %d\n",
2032 memset(outbox, 0, 0xffc);
2033 num_tasks = atomic_dec_return(&priv->opreq_count);
2037 mlx4_free_cmd_mailbox(dev, mailbox);