2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
41 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
42 struct mlx5e_l2_rule *ai, int type);
43 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
44 struct mlx5e_l2_rule *ai);
60 MLX5E_ACTION_NONE = 0,
65 struct mlx5e_l2_hash_node {
66 struct hlist_node hlist;
68 struct mlx5e_l2_rule ai;
72 static inline int mlx5e_hash_l2(u8 *addr)
77 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
79 struct mlx5e_l2_hash_node *hn;
80 int ix = mlx5e_hash_l2(addr);
83 hlist_for_each_entry(hn, &hash[ix], hlist)
84 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
90 hn->action = MLX5E_ACTION_NONE;
94 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
98 ether_addr_copy(hn->ai.addr, addr);
99 hn->action = MLX5E_ACTION_ADD;
101 hlist_add_head(&hn->hlist, &hash[ix]);
104 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
106 hlist_del(&hn->hlist);
110 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
112 struct net_device *ndev = priv->netdev;
121 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
124 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
126 if (list_size > max_list_size) {
128 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
129 list_size, max_list_size);
130 list_size = max_list_size;
133 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
138 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
144 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
146 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
153 enum mlx5e_vlan_rule_type {
154 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
155 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
156 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
157 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
158 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
161 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
162 enum mlx5e_vlan_rule_type rule_type,
163 u16 vid, struct mlx5_flow_spec *spec)
165 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
166 struct mlx5_flow_destination dest = {};
167 struct mlx5_flow_handle **rule_p;
168 MLX5_DECLARE_FLOW_ACT(flow_act);
171 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
172 dest.ft = priv->fs.l2.ft.t;
174 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
177 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
178 /* cvlan_tag enabled in match criteria and
179 * disabled in match value means both S & C tags
180 * don't exist (untagged of both)
182 rule_p = &priv->fs.vlan.untagged_rule;
183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
184 outer_headers.cvlan_tag);
186 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
187 rule_p = &priv->fs.vlan.any_cvlan_rule;
188 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
189 outer_headers.cvlan_tag);
190 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
192 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
193 rule_p = &priv->fs.vlan.any_svlan_rule;
194 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
195 outer_headers.svlan_tag);
196 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
198 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
199 rule_p = &priv->fs.vlan.active_svlans_rule[vid];
200 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
201 outer_headers.svlan_tag);
202 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
203 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
204 outer_headers.first_vid);
205 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
208 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
209 rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
210 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
211 outer_headers.cvlan_tag);
212 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
213 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
214 outer_headers.first_vid);
215 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
220 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
222 if (IS_ERR(*rule_p)) {
223 err = PTR_ERR(*rule_p);
225 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
231 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
232 enum mlx5e_vlan_rule_type rule_type, u16 vid)
234 struct mlx5_flow_spec *spec;
237 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
241 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
242 mlx5e_vport_context_update_vlans(priv);
244 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
251 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
252 enum mlx5e_vlan_rule_type rule_type, u16 vid)
255 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
256 if (priv->fs.vlan.untagged_rule) {
257 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
258 priv->fs.vlan.untagged_rule = NULL;
261 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
262 if (priv->fs.vlan.any_cvlan_rule) {
263 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
264 priv->fs.vlan.any_cvlan_rule = NULL;
267 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
268 if (priv->fs.vlan.any_svlan_rule) {
269 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
270 priv->fs.vlan.any_svlan_rule = NULL;
273 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
274 if (priv->fs.vlan.active_svlans_rule[vid]) {
275 mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
276 priv->fs.vlan.active_svlans_rule[vid] = NULL;
279 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
280 if (priv->fs.vlan.active_cvlans_rule[vid]) {
281 mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
282 priv->fs.vlan.active_cvlans_rule[vid] = NULL;
284 mlx5e_vport_context_update_vlans(priv);
289 static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
291 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
292 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
295 static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
299 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
303 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
306 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
308 if (!priv->fs.vlan.cvlan_filter_disabled)
311 priv->fs.vlan.cvlan_filter_disabled = false;
312 if (priv->netdev->flags & IFF_PROMISC)
314 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
317 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
319 if (priv->fs.vlan.cvlan_filter_disabled)
322 priv->fs.vlan.cvlan_filter_disabled = true;
323 if (priv->netdev->flags & IFF_PROMISC)
325 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
328 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
332 set_bit(vid, priv->fs.vlan.active_cvlans);
334 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
336 clear_bit(vid, priv->fs.vlan.active_cvlans);
341 static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
343 struct net_device *netdev = priv->netdev;
346 set_bit(vid, priv->fs.vlan.active_svlans);
348 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
350 clear_bit(vid, priv->fs.vlan.active_svlans);
354 /* Need to fix some features.. */
355 netdev_update_features(netdev);
359 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
361 struct mlx5e_priv *priv = netdev_priv(dev);
363 if (be16_to_cpu(proto) == ETH_P_8021Q)
364 return mlx5e_vlan_rx_add_cvid(priv, vid);
365 else if (be16_to_cpu(proto) == ETH_P_8021AD)
366 return mlx5e_vlan_rx_add_svid(priv, vid);
371 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
373 struct mlx5e_priv *priv = netdev_priv(dev);
375 if (be16_to_cpu(proto) == ETH_P_8021Q) {
376 clear_bit(vid, priv->fs.vlan.active_cvlans);
377 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
378 } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
379 clear_bit(vid, priv->fs.vlan.active_svlans);
380 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
381 netdev_update_features(dev);
387 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
391 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
393 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
394 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
397 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
398 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
400 if (priv->fs.vlan.cvlan_filter_disabled &&
401 !(priv->netdev->flags & IFF_PROMISC))
402 mlx5e_add_any_vid_rules(priv);
405 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
409 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
411 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
412 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
415 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
416 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
418 if (priv->fs.vlan.cvlan_filter_disabled &&
419 !(priv->netdev->flags & IFF_PROMISC))
420 mlx5e_del_any_vid_rules(priv);
423 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
424 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
425 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
427 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
428 struct mlx5e_l2_hash_node *hn)
430 u8 action = hn->action;
431 u8 mac_addr[ETH_ALEN];
434 ether_addr_copy(mac_addr, hn->ai.addr);
437 case MLX5E_ACTION_ADD:
438 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
439 if (!is_multicast_ether_addr(mac_addr)) {
440 l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
443 hn->action = MLX5E_ACTION_NONE;
446 case MLX5E_ACTION_DEL:
447 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
448 l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
449 mlx5e_del_l2_flow_rule(priv, &hn->ai);
450 mlx5e_del_l2_from_hash(hn);
455 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
456 action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
459 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
461 struct net_device *netdev = priv->netdev;
462 struct netdev_hw_addr *ha;
464 netif_addr_lock_bh(netdev);
466 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
467 priv->netdev->dev_addr);
469 netdev_for_each_uc_addr(ha, netdev)
470 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
472 netdev_for_each_mc_addr(ha, netdev)
473 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
475 netif_addr_unlock_bh(netdev);
478 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
479 u8 addr_array[][ETH_ALEN], int size)
481 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
482 struct net_device *ndev = priv->netdev;
483 struct mlx5e_l2_hash_node *hn;
484 struct hlist_head *addr_list;
485 struct hlist_node *tmp;
489 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
491 if (is_uc) /* Make sure our own address is pushed first */
492 ether_addr_copy(addr_array[i++], ndev->dev_addr);
493 else if (priv->fs.l2.broadcast_enabled)
494 ether_addr_copy(addr_array[i++], ndev->broadcast);
496 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
497 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
501 ether_addr_copy(addr_array[i++], hn->ai.addr);
505 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
508 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
509 struct mlx5e_l2_hash_node *hn;
510 u8 (*addr_array)[ETH_ALEN] = NULL;
511 struct hlist_head *addr_list;
512 struct hlist_node *tmp;
518 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
520 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
521 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
523 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
524 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
527 if (size > max_size) {
528 netdev_warn(priv->netdev,
529 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
530 is_uc ? "UC" : "MC", size, max_size);
535 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
540 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
543 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
546 netdev_err(priv->netdev,
547 "Failed to modify vport %s list err(%d)\n",
548 is_uc ? "UC" : "MC", err);
552 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
554 struct mlx5e_l2_table *ea = &priv->fs.l2;
556 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
557 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
558 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
559 ea->allmulti_enabled,
560 ea->promisc_enabled);
563 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
565 struct mlx5e_l2_hash_node *hn;
566 struct hlist_node *tmp;
569 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
570 mlx5e_execute_l2_action(priv, hn);
572 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
573 mlx5e_execute_l2_action(priv, hn);
576 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
578 struct mlx5e_l2_hash_node *hn;
579 struct hlist_node *tmp;
582 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
583 hn->action = MLX5E_ACTION_DEL;
584 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
585 hn->action = MLX5E_ACTION_DEL;
587 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
588 mlx5e_sync_netdev_addr(priv);
590 mlx5e_apply_netdev_addr(priv);
593 void mlx5e_set_rx_mode_work(struct work_struct *work)
595 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
598 struct mlx5e_l2_table *ea = &priv->fs.l2;
599 struct net_device *ndev = priv->netdev;
601 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
602 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
603 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
604 bool broadcast_enabled = rx_mode_enable;
606 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
607 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
608 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
609 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
610 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
611 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
613 if (enable_promisc) {
614 if (!priv->channels.params.vlan_strip_disable)
615 netdev_warn_once(ndev,
616 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
617 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
618 if (!priv->fs.vlan.cvlan_filter_disabled)
619 mlx5e_add_any_vid_rules(priv);
622 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
623 if (enable_broadcast)
624 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
626 mlx5e_handle_netdev_addr(priv);
628 if (disable_broadcast)
629 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
630 if (disable_allmulti)
631 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
632 if (disable_promisc) {
633 if (!priv->fs.vlan.cvlan_filter_disabled)
634 mlx5e_del_any_vid_rules(priv);
635 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
638 ea->promisc_enabled = promisc_enabled;
639 ea->allmulti_enabled = allmulti_enabled;
640 ea->broadcast_enabled = broadcast_enabled;
642 mlx5e_vport_context_update(priv);
645 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
649 for (i = ft->num_groups - 1; i >= 0; i--) {
650 if (!IS_ERR_OR_NULL(ft->g[i]))
651 mlx5_destroy_flow_group(ft->g[i]);
657 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
659 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
662 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
664 mlx5e_destroy_groups(ft);
666 mlx5_destroy_flow_table(ft->t);
670 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
674 for (i = 0; i < MLX5E_NUM_TT; i++) {
675 if (!IS_ERR_OR_NULL(ttc->rules[i])) {
676 mlx5_del_flow_rules(ttc->rules[i]);
677 ttc->rules[i] = NULL;
681 for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
682 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
683 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
684 ttc->tunnel_rules[i] = NULL;
689 struct mlx5e_etype_proto {
694 static struct mlx5e_etype_proto ttc_rules[] = {
695 [MLX5E_TT_IPV4_TCP] = {
697 .proto = IPPROTO_TCP,
699 [MLX5E_TT_IPV6_TCP] = {
701 .proto = IPPROTO_TCP,
703 [MLX5E_TT_IPV4_UDP] = {
705 .proto = IPPROTO_UDP,
707 [MLX5E_TT_IPV6_UDP] = {
709 .proto = IPPROTO_UDP,
711 [MLX5E_TT_IPV4_IPSEC_AH] = {
715 [MLX5E_TT_IPV6_IPSEC_AH] = {
719 [MLX5E_TT_IPV4_IPSEC_ESP] = {
721 .proto = IPPROTO_ESP,
723 [MLX5E_TT_IPV6_IPSEC_ESP] = {
725 .proto = IPPROTO_ESP,
741 static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
742 [MLX5E_TT_IPV4_GRE] = {
744 .proto = IPPROTO_GRE,
746 [MLX5E_TT_IPV6_GRE] = {
748 .proto = IPPROTO_GRE,
752 static u8 mlx5e_etype_to_ipv(u16 ethertype)
754 if (ethertype == ETH_P_IP)
757 if (ethertype == ETH_P_IPV6)
763 static struct mlx5_flow_handle *
764 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
765 struct mlx5_flow_table *ft,
766 struct mlx5_flow_destination *dest,
770 int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
771 MLX5_DECLARE_FLOW_ACT(flow_act);
772 struct mlx5_flow_handle *rule;
773 struct mlx5_flow_spec *spec;
777 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
779 return ERR_PTR(-ENOMEM);
782 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
783 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
784 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
787 ipv = mlx5e_etype_to_ipv(etype);
788 if (match_ipv_outer && ipv) {
789 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
790 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
791 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
793 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
794 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
795 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
798 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
801 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
805 return err ? ERR_PTR(err) : rule;
808 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
809 struct ttc_params *params,
810 struct mlx5e_ttc_table *ttc)
812 struct mlx5_flow_destination dest = {};
813 struct mlx5_flow_handle **rules;
814 struct mlx5_flow_table *ft;
821 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
822 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
823 if (tt == MLX5E_TT_ANY)
824 dest.tir_num = params->any_tt_tirn;
826 dest.tir_num = params->indir_tirn[tt];
827 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
829 ttc_rules[tt].proto);
830 if (IS_ERR(rules[tt]))
834 if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
837 rules = ttc->tunnel_rules;
838 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
839 dest.ft = params->inner_ttc->ft.t;
840 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
841 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
842 ttc_tunnel_rules[tt].etype,
843 ttc_tunnel_rules[tt].proto);
844 if (IS_ERR(rules[tt]))
851 err = PTR_ERR(rules[tt]);
853 mlx5e_cleanup_ttc_rules(ttc);
857 #define MLX5E_TTC_NUM_GROUPS 3
858 #define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
859 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
860 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
861 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
862 MLX5E_TTC_GROUP2_SIZE +\
863 MLX5E_TTC_GROUP3_SIZE)
865 #define MLX5E_INNER_TTC_NUM_GROUPS 3
866 #define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
867 #define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
868 #define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
869 #define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
870 MLX5E_INNER_TTC_GROUP2_SIZE +\
871 MLX5E_INNER_TTC_GROUP3_SIZE)
873 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
876 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
877 struct mlx5e_flow_table *ft = &ttc->ft;
883 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
884 sizeof(*ft->g), GFP_KERNEL);
887 in = kvzalloc(inlen, GFP_KERNEL);
894 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
895 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
897 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
899 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
900 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
901 MLX5_SET_CFG(in, start_flow_index, ix);
902 ix += MLX5E_TTC_GROUP1_SIZE;
903 MLX5_SET_CFG(in, end_flow_index, ix - 1);
904 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
905 if (IS_ERR(ft->g[ft->num_groups]))
910 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
911 MLX5_SET_CFG(in, start_flow_index, ix);
912 ix += MLX5E_TTC_GROUP2_SIZE;
913 MLX5_SET_CFG(in, end_flow_index, ix - 1);
914 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
915 if (IS_ERR(ft->g[ft->num_groups]))
920 memset(in, 0, inlen);
921 MLX5_SET_CFG(in, start_flow_index, ix);
922 ix += MLX5E_TTC_GROUP3_SIZE;
923 MLX5_SET_CFG(in, end_flow_index, ix - 1);
924 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
925 if (IS_ERR(ft->g[ft->num_groups]))
933 err = PTR_ERR(ft->g[ft->num_groups]);
934 ft->g[ft->num_groups] = NULL;
940 static struct mlx5_flow_handle *
941 mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
942 struct mlx5_flow_table *ft,
943 struct mlx5_flow_destination *dest,
946 MLX5_DECLARE_FLOW_ACT(flow_act);
947 struct mlx5_flow_handle *rule;
948 struct mlx5_flow_spec *spec;
952 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
954 return ERR_PTR(-ENOMEM);
956 ipv = mlx5e_etype_to_ipv(etype);
958 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
959 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
960 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
964 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
965 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
966 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
969 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
972 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
976 return err ? ERR_PTR(err) : rule;
979 static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
980 struct ttc_params *params,
981 struct mlx5e_ttc_table *ttc)
983 struct mlx5_flow_destination dest = {};
984 struct mlx5_flow_handle **rules;
985 struct mlx5_flow_table *ft;
992 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
993 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
994 if (tt == MLX5E_TT_ANY)
995 dest.tir_num = params->any_tt_tirn;
997 dest.tir_num = params->indir_tirn[tt];
999 rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
1000 ttc_rules[tt].etype,
1001 ttc_rules[tt].proto);
1002 if (IS_ERR(rules[tt]))
1009 err = PTR_ERR(rules[tt]);
1011 mlx5e_cleanup_ttc_rules(ttc);
1015 static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
1017 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1018 struct mlx5e_flow_table *ft = &ttc->ft;
1024 ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1027 in = kvzalloc(inlen, GFP_KERNEL);
1034 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1035 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1036 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
1037 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1038 MLX5_SET_CFG(in, start_flow_index, ix);
1039 ix += MLX5E_INNER_TTC_GROUP1_SIZE;
1040 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1041 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1042 if (IS_ERR(ft->g[ft->num_groups]))
1047 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
1048 MLX5_SET_CFG(in, start_flow_index, ix);
1049 ix += MLX5E_INNER_TTC_GROUP2_SIZE;
1050 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1051 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1052 if (IS_ERR(ft->g[ft->num_groups]))
1057 memset(in, 0, inlen);
1058 MLX5_SET_CFG(in, start_flow_index, ix);
1059 ix += MLX5E_INNER_TTC_GROUP3_SIZE;
1060 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1061 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1062 if (IS_ERR(ft->g[ft->num_groups]))
1070 err = PTR_ERR(ft->g[ft->num_groups]);
1071 ft->g[ft->num_groups] = NULL;
1077 void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
1078 struct ttc_params *ttc_params)
1080 ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
1081 ttc_params->inner_ttc = &priv->fs.inner_ttc;
1084 void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
1086 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1088 ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
1089 ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
1090 ft_attr->prio = MLX5E_NIC_PRIO;
1093 void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
1096 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1098 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
1099 ft_attr->level = MLX5E_TTC_FT_LEVEL;
1100 ft_attr->prio = MLX5E_NIC_PRIO;
1103 int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1104 struct mlx5e_ttc_table *ttc)
1106 struct mlx5e_flow_table *ft = &ttc->ft;
1109 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1112 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1113 if (IS_ERR(ft->t)) {
1114 err = PTR_ERR(ft->t);
1119 err = mlx5e_create_inner_ttc_table_groups(ttc);
1123 err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
1130 mlx5e_destroy_flow_table(ft);
1134 void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1135 struct mlx5e_ttc_table *ttc)
1137 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1140 mlx5e_cleanup_ttc_rules(ttc);
1141 mlx5e_destroy_flow_table(&ttc->ft);
1144 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
1145 struct mlx5e_ttc_table *ttc)
1147 mlx5e_cleanup_ttc_rules(ttc);
1148 mlx5e_destroy_flow_table(&ttc->ft);
1151 int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1152 struct mlx5e_ttc_table *ttc)
1154 bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1155 struct mlx5e_flow_table *ft = &ttc->ft;
1158 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1159 if (IS_ERR(ft->t)) {
1160 err = PTR_ERR(ft->t);
1165 err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
1169 err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
1175 mlx5e_destroy_flow_table(ft);
1179 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
1180 struct mlx5e_l2_rule *ai)
1182 if (!IS_ERR_OR_NULL(ai->rule)) {
1183 mlx5_del_flow_rules(ai->rule);
1188 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
1189 struct mlx5e_l2_rule *ai, int type)
1191 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
1192 struct mlx5_flow_destination dest = {};
1193 MLX5_DECLARE_FLOW_ACT(flow_act);
1194 struct mlx5_flow_spec *spec;
1199 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1203 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1204 outer_headers.dmac_47_16);
1205 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1206 outer_headers.dmac_47_16);
1208 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1209 dest.ft = priv->fs.ttc.ft.t;
1212 case MLX5E_FULLMATCH:
1213 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1214 eth_broadcast_addr(mc_dmac);
1215 ether_addr_copy(mv_dmac, ai->addr);
1218 case MLX5E_ALLMULTI:
1219 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1228 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1229 if (IS_ERR(ai->rule)) {
1230 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
1232 err = PTR_ERR(ai->rule);
1241 #define MLX5E_NUM_L2_GROUPS 3
1242 #define MLX5E_L2_GROUP1_SIZE BIT(0)
1243 #define MLX5E_L2_GROUP2_SIZE BIT(15)
1244 #define MLX5E_L2_GROUP3_SIZE BIT(0)
1245 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1246 MLX5E_L2_GROUP2_SIZE +\
1247 MLX5E_L2_GROUP3_SIZE)
1248 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1250 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1251 struct mlx5e_flow_table *ft = &l2_table->ft;
1258 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1261 in = kvzalloc(inlen, GFP_KERNEL);
1267 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1268 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1269 outer_headers.dmac_47_16);
1270 /* Flow Group for promiscuous */
1271 MLX5_SET_CFG(in, start_flow_index, ix);
1272 ix += MLX5E_L2_GROUP1_SIZE;
1273 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1274 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1275 if (IS_ERR(ft->g[ft->num_groups]))
1276 goto err_destroy_groups;
1279 /* Flow Group for full match */
1280 eth_broadcast_addr(mc_dmac);
1281 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1282 MLX5_SET_CFG(in, start_flow_index, ix);
1283 ix += MLX5E_L2_GROUP2_SIZE;
1284 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1285 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1286 if (IS_ERR(ft->g[ft->num_groups]))
1287 goto err_destroy_groups;
1290 /* Flow Group for allmulti */
1291 eth_zero_addr(mc_dmac);
1293 MLX5_SET_CFG(in, start_flow_index, ix);
1294 ix += MLX5E_L2_GROUP3_SIZE;
1295 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1296 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1297 if (IS_ERR(ft->g[ft->num_groups]))
1298 goto err_destroy_groups;
1305 err = PTR_ERR(ft->g[ft->num_groups]);
1306 ft->g[ft->num_groups] = NULL;
1307 mlx5e_destroy_groups(ft);
1313 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1315 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
1318 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1320 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
1321 struct mlx5e_flow_table *ft = &l2_table->ft;
1322 struct mlx5_flow_table_attr ft_attr = {};
1327 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1328 ft_attr.level = MLX5E_L2_FT_LEVEL;
1329 ft_attr.prio = MLX5E_NIC_PRIO;
1331 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1332 if (IS_ERR(ft->t)) {
1333 err = PTR_ERR(ft->t);
1338 err = mlx5e_create_l2_table_groups(l2_table);
1340 goto err_destroy_flow_table;
1344 err_destroy_flow_table:
1345 mlx5_destroy_flow_table(ft->t);
1351 #define MLX5E_NUM_VLAN_GROUPS 4
1352 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1353 #define MLX5E_VLAN_GROUP1_SIZE BIT(12)
1354 #define MLX5E_VLAN_GROUP2_SIZE BIT(1)
1355 #define MLX5E_VLAN_GROUP3_SIZE BIT(0)
1356 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1357 MLX5E_VLAN_GROUP1_SIZE +\
1358 MLX5E_VLAN_GROUP2_SIZE +\
1359 MLX5E_VLAN_GROUP3_SIZE)
1361 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1366 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1368 memset(in, 0, inlen);
1369 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1370 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1371 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1372 MLX5_SET_CFG(in, start_flow_index, ix);
1373 ix += MLX5E_VLAN_GROUP0_SIZE;
1374 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1375 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1376 if (IS_ERR(ft->g[ft->num_groups]))
1377 goto err_destroy_groups;
1380 memset(in, 0, inlen);
1381 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1382 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1383 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1384 MLX5_SET_CFG(in, start_flow_index, ix);
1385 ix += MLX5E_VLAN_GROUP1_SIZE;
1386 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1387 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1388 if (IS_ERR(ft->g[ft->num_groups]))
1389 goto err_destroy_groups;
1392 memset(in, 0, inlen);
1393 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1394 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1395 MLX5_SET_CFG(in, start_flow_index, ix);
1396 ix += MLX5E_VLAN_GROUP2_SIZE;
1397 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1398 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1399 if (IS_ERR(ft->g[ft->num_groups]))
1400 goto err_destroy_groups;
1403 memset(in, 0, inlen);
1404 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1405 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1406 MLX5_SET_CFG(in, start_flow_index, ix);
1407 ix += MLX5E_VLAN_GROUP3_SIZE;
1408 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1409 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1410 if (IS_ERR(ft->g[ft->num_groups]))
1411 goto err_destroy_groups;
1417 err = PTR_ERR(ft->g[ft->num_groups]);
1418 ft->g[ft->num_groups] = NULL;
1419 mlx5e_destroy_groups(ft);
1424 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1427 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1430 in = kvzalloc(inlen, GFP_KERNEL);
1434 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1440 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1442 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1443 struct mlx5_flow_table_attr ft_attr = {};
1448 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1449 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1450 ft_attr.prio = MLX5E_NIC_PRIO;
1452 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1454 if (IS_ERR(ft->t)) {
1455 err = PTR_ERR(ft->t);
1459 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1462 goto err_destroy_vlan_table;
1465 err = mlx5e_create_vlan_table_groups(ft);
1469 mlx5e_add_vlan_rules(priv);
1475 err_destroy_vlan_table:
1476 mlx5_destroy_flow_table(ft->t);
1482 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1484 mlx5e_del_vlan_rules(priv);
1485 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1488 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1490 struct ttc_params ttc_params = {};
1493 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1494 MLX5_FLOW_NAMESPACE_KERNEL);
1499 err = mlx5e_arfs_create_tables(priv);
1501 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1503 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1506 mlx5e_set_ttc_basic_params(priv, &ttc_params);
1507 mlx5e_set_inner_ttc_ft_params(&ttc_params);
1508 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1509 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
1511 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
1513 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1515 goto err_destroy_arfs_tables;
1518 mlx5e_set_ttc_ft_params(&ttc_params);
1519 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1520 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1522 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1524 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1526 goto err_destroy_inner_ttc_table;
1529 err = mlx5e_create_l2_table(priv);
1531 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1533 goto err_destroy_ttc_table;
1536 err = mlx5e_create_vlan_table(priv);
1538 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1540 goto err_destroy_l2_table;
1543 mlx5e_ethtool_init_steering(priv);
1547 err_destroy_l2_table:
1548 mlx5e_destroy_l2_table(priv);
1549 err_destroy_ttc_table:
1550 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1551 err_destroy_inner_ttc_table:
1552 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1553 err_destroy_arfs_tables:
1554 mlx5e_arfs_destroy_tables(priv);
1559 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1561 mlx5e_destroy_vlan_table(priv);
1562 mlx5e_destroy_l2_table(priv);
1563 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1564 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1565 mlx5e_arfs_destroy_tables(priv);
1566 mlx5e_ethtool_cleanup_steering(priv);