2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
40 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
41 struct mlx5e_l2_rule *ai, int type);
42 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
43 struct mlx5e_l2_rule *ai);
59 MLX5E_ACTION_NONE = 0,
64 struct mlx5e_l2_hash_node {
65 struct hlist_node hlist;
67 struct mlx5e_l2_rule ai;
70 static inline int mlx5e_hash_l2(u8 *addr)
75 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
77 struct mlx5e_l2_hash_node *hn;
78 int ix = mlx5e_hash_l2(addr);
81 hlist_for_each_entry(hn, &hash[ix], hlist)
82 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
88 hn->action = MLX5E_ACTION_NONE;
92 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
96 ether_addr_copy(hn->ai.addr, addr);
97 hn->action = MLX5E_ACTION_ADD;
99 hlist_add_head(&hn->hlist, &hash[ix]);
102 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
104 hlist_del(&hn->hlist);
108 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
110 struct net_device *ndev = priv->netdev;
119 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
122 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
124 if (list_size > max_list_size) {
126 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
127 list_size, max_list_size);
128 list_size = max_list_size;
131 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
136 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
142 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
144 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
151 enum mlx5e_vlan_rule_type {
152 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
153 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
154 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
155 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
158 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
159 enum mlx5e_vlan_rule_type rule_type,
160 u16 vid, struct mlx5_flow_spec *spec)
162 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
163 struct mlx5_flow_destination dest;
164 struct mlx5_flow_handle **rule_p;
165 MLX5_DECLARE_FLOW_ACT(flow_act);
168 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
169 dest.ft = priv->fs.l2.ft.t;
171 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
174 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
175 rule_p = &priv->fs.vlan.untagged_rule;
176 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
177 outer_headers.cvlan_tag);
179 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
180 rule_p = &priv->fs.vlan.any_cvlan_rule;
181 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
182 outer_headers.cvlan_tag);
183 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
185 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
186 rule_p = &priv->fs.vlan.any_svlan_rule;
187 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
188 outer_headers.svlan_tag);
189 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
191 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
192 rule_p = &priv->fs.vlan.active_vlans_rule[vid];
193 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
194 outer_headers.cvlan_tag);
195 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
196 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
197 outer_headers.first_vid);
198 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
203 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
205 if (IS_ERR(*rule_p)) {
206 err = PTR_ERR(*rule_p);
208 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
214 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
215 enum mlx5e_vlan_rule_type rule_type, u16 vid)
217 struct mlx5_flow_spec *spec;
220 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
224 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
225 mlx5e_vport_context_update_vlans(priv);
227 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
234 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
235 enum mlx5e_vlan_rule_type rule_type, u16 vid)
238 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
239 if (priv->fs.vlan.untagged_rule) {
240 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
241 priv->fs.vlan.untagged_rule = NULL;
244 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
245 if (priv->fs.vlan.any_cvlan_rule) {
246 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
247 priv->fs.vlan.any_cvlan_rule = NULL;
250 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
251 if (priv->fs.vlan.any_svlan_rule) {
252 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
253 priv->fs.vlan.any_svlan_rule = NULL;
256 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
257 mlx5e_vport_context_update_vlans(priv);
258 if (priv->fs.vlan.active_vlans_rule[vid]) {
259 mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
260 priv->fs.vlan.active_vlans_rule[vid] = NULL;
262 mlx5e_vport_context_update_vlans(priv);
267 static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
269 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
270 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
273 static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
277 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
281 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
284 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
286 if (!priv->fs.vlan.filter_disabled)
289 priv->fs.vlan.filter_disabled = false;
290 if (priv->netdev->flags & IFF_PROMISC)
292 mlx5e_del_any_vid_rules(priv);
295 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
297 if (priv->fs.vlan.filter_disabled)
300 priv->fs.vlan.filter_disabled = true;
301 if (priv->netdev->flags & IFF_PROMISC)
303 mlx5e_add_any_vid_rules(priv);
306 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
309 struct mlx5e_priv *priv = netdev_priv(dev);
311 set_bit(vid, priv->fs.vlan.active_vlans);
313 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
316 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
319 struct mlx5e_priv *priv = netdev_priv(dev);
321 clear_bit(vid, priv->fs.vlan.active_vlans);
323 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
328 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
332 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
334 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
335 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
338 if (priv->fs.vlan.filter_disabled &&
339 !(priv->netdev->flags & IFF_PROMISC))
340 mlx5e_add_any_vid_rules(priv);
343 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
347 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
349 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
350 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
353 if (priv->fs.vlan.filter_disabled &&
354 !(priv->netdev->flags & IFF_PROMISC))
355 mlx5e_del_any_vid_rules(priv);
358 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
359 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
360 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
362 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
363 struct mlx5e_l2_hash_node *hn)
365 switch (hn->action) {
366 case MLX5E_ACTION_ADD:
367 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
368 hn->action = MLX5E_ACTION_NONE;
371 case MLX5E_ACTION_DEL:
372 mlx5e_del_l2_flow_rule(priv, &hn->ai);
373 mlx5e_del_l2_from_hash(hn);
378 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
380 struct net_device *netdev = priv->netdev;
381 struct netdev_hw_addr *ha;
383 netif_addr_lock_bh(netdev);
385 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
386 priv->netdev->dev_addr);
388 netdev_for_each_uc_addr(ha, netdev)
389 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
391 netdev_for_each_mc_addr(ha, netdev)
392 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
394 netif_addr_unlock_bh(netdev);
397 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
398 u8 addr_array[][ETH_ALEN], int size)
400 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
401 struct net_device *ndev = priv->netdev;
402 struct mlx5e_l2_hash_node *hn;
403 struct hlist_head *addr_list;
404 struct hlist_node *tmp;
408 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
410 if (is_uc) /* Make sure our own address is pushed first */
411 ether_addr_copy(addr_array[i++], ndev->dev_addr);
412 else if (priv->fs.l2.broadcast_enabled)
413 ether_addr_copy(addr_array[i++], ndev->broadcast);
415 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
416 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
420 ether_addr_copy(addr_array[i++], hn->ai.addr);
424 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
427 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
428 struct mlx5e_l2_hash_node *hn;
429 u8 (*addr_array)[ETH_ALEN] = NULL;
430 struct hlist_head *addr_list;
431 struct hlist_node *tmp;
437 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
439 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
440 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
442 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
443 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
446 if (size > max_size) {
447 netdev_warn(priv->netdev,
448 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
449 is_uc ? "UC" : "MC", size, max_size);
454 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
459 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
462 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
465 netdev_err(priv->netdev,
466 "Failed to modify vport %s list err(%d)\n",
467 is_uc ? "UC" : "MC", err);
471 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
473 struct mlx5e_l2_table *ea = &priv->fs.l2;
475 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
476 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
477 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
478 ea->allmulti_enabled,
479 ea->promisc_enabled);
482 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
484 struct mlx5e_l2_hash_node *hn;
485 struct hlist_node *tmp;
488 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
489 mlx5e_execute_l2_action(priv, hn);
491 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
492 mlx5e_execute_l2_action(priv, hn);
495 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
497 struct mlx5e_l2_hash_node *hn;
498 struct hlist_node *tmp;
501 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
502 hn->action = MLX5E_ACTION_DEL;
503 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
504 hn->action = MLX5E_ACTION_DEL;
506 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
507 mlx5e_sync_netdev_addr(priv);
509 mlx5e_apply_netdev_addr(priv);
512 void mlx5e_set_rx_mode_work(struct work_struct *work)
514 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
517 struct mlx5e_l2_table *ea = &priv->fs.l2;
518 struct net_device *ndev = priv->netdev;
520 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
521 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
522 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
523 bool broadcast_enabled = rx_mode_enable;
525 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
526 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
527 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
528 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
529 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
530 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
532 if (enable_promisc) {
533 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
534 if (!priv->fs.vlan.filter_disabled)
535 mlx5e_add_any_vid_rules(priv);
538 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
539 if (enable_broadcast)
540 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
542 mlx5e_handle_netdev_addr(priv);
544 if (disable_broadcast)
545 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
546 if (disable_allmulti)
547 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
548 if (disable_promisc) {
549 if (!priv->fs.vlan.filter_disabled)
550 mlx5e_del_any_vid_rules(priv);
551 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
554 ea->promisc_enabled = promisc_enabled;
555 ea->allmulti_enabled = allmulti_enabled;
556 ea->broadcast_enabled = broadcast_enabled;
558 mlx5e_vport_context_update(priv);
561 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
565 for (i = ft->num_groups - 1; i >= 0; i--) {
566 if (!IS_ERR_OR_NULL(ft->g[i]))
567 mlx5_destroy_flow_group(ft->g[i]);
573 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
575 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
578 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
580 mlx5e_destroy_groups(ft);
582 mlx5_destroy_flow_table(ft->t);
586 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
590 for (i = 0; i < MLX5E_NUM_TT; i++) {
591 if (!IS_ERR_OR_NULL(ttc->rules[i])) {
592 mlx5_del_flow_rules(ttc->rules[i]);
593 ttc->rules[i] = NULL;
602 [MLX5E_TT_IPV4_TCP] = {
604 .proto = IPPROTO_TCP,
606 [MLX5E_TT_IPV6_TCP] = {
608 .proto = IPPROTO_TCP,
610 [MLX5E_TT_IPV4_UDP] = {
612 .proto = IPPROTO_UDP,
614 [MLX5E_TT_IPV6_UDP] = {
616 .proto = IPPROTO_UDP,
618 [MLX5E_TT_IPV4_IPSEC_AH] = {
622 [MLX5E_TT_IPV6_IPSEC_AH] = {
626 [MLX5E_TT_IPV4_IPSEC_ESP] = {
628 .proto = IPPROTO_ESP,
630 [MLX5E_TT_IPV6_IPSEC_ESP] = {
632 .proto = IPPROTO_ESP,
648 static struct mlx5_flow_handle *
649 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
650 struct mlx5_flow_table *ft,
651 struct mlx5_flow_destination *dest,
655 MLX5_DECLARE_FLOW_ACT(flow_act);
656 struct mlx5_flow_handle *rule;
657 struct mlx5_flow_spec *spec;
660 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
662 return ERR_PTR(-ENOMEM);
665 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
666 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
667 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
670 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
671 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
672 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
675 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
678 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
682 return err ? ERR_PTR(err) : rule;
685 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
687 struct mlx5_flow_destination dest;
688 struct mlx5e_ttc_table *ttc;
689 struct mlx5_flow_handle **rules;
690 struct mlx5_flow_table *ft;
698 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
699 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
700 if (tt == MLX5E_TT_ANY)
701 dest.tir_num = priv->direct_tir[0].tirn;
703 dest.tir_num = priv->indir_tir[tt].tirn;
704 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
706 ttc_rules[tt].proto);
707 if (IS_ERR(rules[tt]))
714 err = PTR_ERR(rules[tt]);
716 mlx5e_cleanup_ttc_rules(ttc);
720 #define MLX5E_TTC_NUM_GROUPS 3
721 #define MLX5E_TTC_GROUP1_SIZE BIT(3)
722 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
723 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
724 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
725 MLX5E_TTC_GROUP2_SIZE +\
726 MLX5E_TTC_GROUP3_SIZE)
727 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
729 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
730 struct mlx5e_flow_table *ft = &ttc->ft;
736 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
737 sizeof(*ft->g), GFP_KERNEL);
740 in = kvzalloc(inlen, GFP_KERNEL);
747 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
748 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
749 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
750 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
751 MLX5_SET_CFG(in, start_flow_index, ix);
752 ix += MLX5E_TTC_GROUP1_SIZE;
753 MLX5_SET_CFG(in, end_flow_index, ix - 1);
754 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
755 if (IS_ERR(ft->g[ft->num_groups]))
760 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
761 MLX5_SET_CFG(in, start_flow_index, ix);
762 ix += MLX5E_TTC_GROUP2_SIZE;
763 MLX5_SET_CFG(in, end_flow_index, ix - 1);
764 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
765 if (IS_ERR(ft->g[ft->num_groups]))
770 memset(in, 0, inlen);
771 MLX5_SET_CFG(in, start_flow_index, ix);
772 ix += MLX5E_TTC_GROUP3_SIZE;
773 MLX5_SET_CFG(in, end_flow_index, ix - 1);
774 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
775 if (IS_ERR(ft->g[ft->num_groups]))
783 err = PTR_ERR(ft->g[ft->num_groups]);
784 ft->g[ft->num_groups] = NULL;
790 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
792 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
794 mlx5e_cleanup_ttc_rules(ttc);
795 mlx5e_destroy_flow_table(&ttc->ft);
798 int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
800 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
801 struct mlx5_flow_table_attr ft_attr = {};
802 struct mlx5e_flow_table *ft = &ttc->ft;
805 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
806 ft_attr.level = MLX5E_TTC_FT_LEVEL;
807 ft_attr.prio = MLX5E_NIC_PRIO;
809 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
811 err = PTR_ERR(ft->t);
816 err = mlx5e_create_ttc_table_groups(ttc);
820 err = mlx5e_generate_ttc_table_rules(priv);
826 mlx5e_destroy_flow_table(ft);
830 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
831 struct mlx5e_l2_rule *ai)
833 if (!IS_ERR_OR_NULL(ai->rule)) {
834 mlx5_del_flow_rules(ai->rule);
839 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
840 struct mlx5e_l2_rule *ai, int type)
842 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
843 struct mlx5_flow_destination dest;
844 MLX5_DECLARE_FLOW_ACT(flow_act);
845 struct mlx5_flow_spec *spec;
850 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
854 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
855 outer_headers.dmac_47_16);
856 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
857 outer_headers.dmac_47_16);
859 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
860 dest.ft = priv->fs.ttc.ft.t;
863 case MLX5E_FULLMATCH:
864 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
865 eth_broadcast_addr(mc_dmac);
866 ether_addr_copy(mv_dmac, ai->addr);
870 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
879 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
880 if (IS_ERR(ai->rule)) {
881 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
883 err = PTR_ERR(ai->rule);
892 #define MLX5E_NUM_L2_GROUPS 3
893 #define MLX5E_L2_GROUP1_SIZE BIT(0)
894 #define MLX5E_L2_GROUP2_SIZE BIT(15)
895 #define MLX5E_L2_GROUP3_SIZE BIT(0)
896 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
897 MLX5E_L2_GROUP2_SIZE +\
898 MLX5E_L2_GROUP3_SIZE)
899 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
901 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
902 struct mlx5e_flow_table *ft = &l2_table->ft;
909 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
912 in = kvzalloc(inlen, GFP_KERNEL);
918 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
919 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
920 outer_headers.dmac_47_16);
921 /* Flow Group for promiscuous */
922 MLX5_SET_CFG(in, start_flow_index, ix);
923 ix += MLX5E_L2_GROUP1_SIZE;
924 MLX5_SET_CFG(in, end_flow_index, ix - 1);
925 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
926 if (IS_ERR(ft->g[ft->num_groups]))
927 goto err_destroy_groups;
930 /* Flow Group for full match */
931 eth_broadcast_addr(mc_dmac);
932 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
933 MLX5_SET_CFG(in, start_flow_index, ix);
934 ix += MLX5E_L2_GROUP2_SIZE;
935 MLX5_SET_CFG(in, end_flow_index, ix - 1);
936 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
937 if (IS_ERR(ft->g[ft->num_groups]))
938 goto err_destroy_groups;
941 /* Flow Group for allmulti */
942 eth_zero_addr(mc_dmac);
944 MLX5_SET_CFG(in, start_flow_index, ix);
945 ix += MLX5E_L2_GROUP3_SIZE;
946 MLX5_SET_CFG(in, end_flow_index, ix - 1);
947 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
948 if (IS_ERR(ft->g[ft->num_groups]))
949 goto err_destroy_groups;
956 err = PTR_ERR(ft->g[ft->num_groups]);
957 ft->g[ft->num_groups] = NULL;
958 mlx5e_destroy_groups(ft);
964 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
966 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
969 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
971 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
972 struct mlx5e_flow_table *ft = &l2_table->ft;
973 struct mlx5_flow_table_attr ft_attr = {};
978 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
979 ft_attr.level = MLX5E_L2_FT_LEVEL;
980 ft_attr.prio = MLX5E_NIC_PRIO;
982 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
984 err = PTR_ERR(ft->t);
989 err = mlx5e_create_l2_table_groups(l2_table);
991 goto err_destroy_flow_table;
995 err_destroy_flow_table:
996 mlx5_destroy_flow_table(ft->t);
1002 #define MLX5E_NUM_VLAN_GROUPS 3
1003 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1004 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1005 #define MLX5E_VLAN_GROUP2_SIZE BIT(0)
1006 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1007 MLX5E_VLAN_GROUP1_SIZE +\
1008 MLX5E_VLAN_GROUP2_SIZE)
1010 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1015 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1017 memset(in, 0, inlen);
1018 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1019 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1020 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1021 MLX5_SET_CFG(in, start_flow_index, ix);
1022 ix += MLX5E_VLAN_GROUP0_SIZE;
1023 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1024 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1025 if (IS_ERR(ft->g[ft->num_groups]))
1026 goto err_destroy_groups;
1029 memset(in, 0, inlen);
1030 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1031 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1032 MLX5_SET_CFG(in, start_flow_index, ix);
1033 ix += MLX5E_VLAN_GROUP1_SIZE;
1034 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1035 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1036 if (IS_ERR(ft->g[ft->num_groups]))
1037 goto err_destroy_groups;
1040 memset(in, 0, inlen);
1041 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1042 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1043 MLX5_SET_CFG(in, start_flow_index, ix);
1044 ix += MLX5E_VLAN_GROUP2_SIZE;
1045 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1046 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1047 if (IS_ERR(ft->g[ft->num_groups]))
1048 goto err_destroy_groups;
1054 err = PTR_ERR(ft->g[ft->num_groups]);
1055 ft->g[ft->num_groups] = NULL;
1056 mlx5e_destroy_groups(ft);
1061 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1064 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1067 in = kvzalloc(inlen, GFP_KERNEL);
1071 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1077 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1079 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1080 struct mlx5_flow_table_attr ft_attr = {};
1085 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1086 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1087 ft_attr.prio = MLX5E_NIC_PRIO;
1089 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1091 if (IS_ERR(ft->t)) {
1092 err = PTR_ERR(ft->t);
1096 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1099 goto err_destroy_vlan_table;
1102 err = mlx5e_create_vlan_table_groups(ft);
1106 mlx5e_add_vlan_rules(priv);
1112 err_destroy_vlan_table:
1113 mlx5_destroy_flow_table(ft->t);
1119 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1121 mlx5e_del_vlan_rules(priv);
1122 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1125 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1129 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1130 MLX5_FLOW_NAMESPACE_KERNEL);
1135 err = mlx5e_arfs_create_tables(priv);
1137 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1139 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1142 err = mlx5e_create_ttc_table(priv);
1144 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1146 goto err_destroy_arfs_tables;
1149 err = mlx5e_create_l2_table(priv);
1151 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1153 goto err_destroy_ttc_table;
1156 err = mlx5e_create_vlan_table(priv);
1158 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1160 goto err_destroy_l2_table;
1163 mlx5e_ethtool_init_steering(priv);
1167 err_destroy_l2_table:
1168 mlx5e_destroy_l2_table(priv);
1169 err_destroy_ttc_table:
1170 mlx5e_destroy_ttc_table(priv);
1171 err_destroy_arfs_tables:
1172 mlx5e_arfs_destroy_tables(priv);
1177 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1179 mlx5e_destroy_vlan_table(priv);
1180 mlx5e_destroy_l2_table(priv);
1181 mlx5e_destroy_ttc_table(priv);
1182 mlx5e_arfs_destroy_tables(priv);
1183 mlx5e_ethtool_cleanup_steering(priv);