Linux-libre 5.0.14-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / qlogic / qede / qede_filter.c
1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <net/udp_tunnel.h>
35 #include <linux/bitops.h>
36 #include <linux/vmalloc.h>
37
38 #include <linux/qed/qed_if.h>
39 #include "qede.h"
40
41 #define QEDE_FILTER_PRINT_MAX_LEN       (64)
42 struct qede_arfs_tuple {
43         union {
44                 __be32 src_ipv4;
45                 struct in6_addr src_ipv6;
46         };
47         union {
48                 __be32 dst_ipv4;
49                 struct in6_addr dst_ipv6;
50         };
51         __be16  src_port;
52         __be16  dst_port;
53         __be16  eth_proto;
54         u8      ip_proto;
55
56         /* Describe filtering mode needed for this kind of filter */
57         enum qed_filter_config_mode mode;
58
59         /* Used to compare new/old filters. Return true if IPs match */
60         bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b);
61
62         /* Given an address into ethhdr build a header from tuple info */
63         void (*build_hdr)(struct qede_arfs_tuple *t, void *header);
64
65         /* Stringify the tuple for a print into the provided buffer */
66         void (*stringify)(struct qede_arfs_tuple *t, void *buffer);
67 };
68
69 struct qede_arfs_fltr_node {
70 #define QEDE_FLTR_VALID  0
71         unsigned long state;
72
73         /* pointer to aRFS packet buffer */
74         void *data;
75
76         /* dma map address of aRFS packet buffer */
77         dma_addr_t mapping;
78
79         /* length of aRFS packet buffer */
80         int buf_len;
81
82         /* tuples to hold from aRFS packet buffer */
83         struct qede_arfs_tuple tuple;
84
85         u32 flow_id;
86         u64 sw_id;
87         u16 rxq_id;
88         u16 next_rxq_id;
89         u8 vfid;
90         bool filter_op;
91         bool used;
92         u8 fw_rc;
93         bool b_is_drop;
94         struct hlist_node node;
95 };
96
97 struct qede_arfs {
98 #define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
99 #define QEDE_ARFS_POLL_COUNT    100
100 #define QEDE_RFS_FLW_BITSHIFT   (4)
101 #define QEDE_RFS_FLW_MASK       ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
102         struct hlist_head       arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
103
104         /* lock for filter list access */
105         spinlock_t              arfs_list_lock;
106         unsigned long           *arfs_fltr_bmap;
107         int                     filter_count;
108
109         /* Currently configured filtering mode */
110         enum qed_filter_config_mode mode;
111 };
112
113 static void qede_configure_arfs_fltr(struct qede_dev *edev,
114                                      struct qede_arfs_fltr_node *n,
115                                      u16 rxq_id, bool add_fltr)
116 {
117         const struct qed_eth_ops *op = edev->ops;
118         struct qed_ntuple_filter_params params;
119
120         if (n->used)
121                 return;
122
123         memset(&params, 0, sizeof(params));
124
125         params.addr = n->mapping;
126         params.length = n->buf_len;
127         params.qid = rxq_id;
128         params.b_is_add = add_fltr;
129         params.b_is_drop = n->b_is_drop;
130
131         if (n->vfid) {
132                 params.b_is_vf = true;
133                 params.vf_id = n->vfid - 1;
134         }
135
136         if (n->tuple.stringify) {
137                 char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN];
138
139                 n->tuple.stringify(&n->tuple, tuple_buffer);
140                 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
141                            "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
142                            add_fltr ? "Adding" : "Deleting",
143                            n->sw_id, tuple_buffer, n->vfid, rxq_id);
144         }
145
146         n->used = true;
147         n->filter_op = add_fltr;
148         op->ntuple_filter_config(edev->cdev, n, &params);
149 }
150
151 static void
152 qede_free_arfs_filter(struct qede_dev *edev,  struct qede_arfs_fltr_node *fltr)
153 {
154         kfree(fltr->data);
155
156         if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
157                 clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
158
159         kfree(fltr);
160 }
161
162 static int
163 qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
164                                       struct qede_arfs_fltr_node *fltr,
165                                       u16 bucket_idx)
166 {
167         fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
168                                        fltr->buf_len, DMA_TO_DEVICE);
169         if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
170                 DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
171                 qede_free_arfs_filter(edev, fltr);
172                 return -ENOMEM;
173         }
174
175         INIT_HLIST_NODE(&fltr->node);
176         hlist_add_head(&fltr->node,
177                        QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
178
179         edev->arfs->filter_count++;
180         if (edev->arfs->filter_count == 1 &&
181             edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) {
182                 edev->ops->configure_arfs_searcher(edev->cdev,
183                                                    fltr->tuple.mode);
184                 edev->arfs->mode = fltr->tuple.mode;
185         }
186
187         return 0;
188 }
189
190 static void
191 qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
192                                       struct qede_arfs_fltr_node *fltr)
193 {
194         hlist_del(&fltr->node);
195         dma_unmap_single(&edev->pdev->dev, fltr->mapping,
196                          fltr->buf_len, DMA_TO_DEVICE);
197
198         qede_free_arfs_filter(edev, fltr);
199
200         edev->arfs->filter_count--;
201         if (!edev->arfs->filter_count &&
202             edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
203                 enum qed_filter_config_mode mode;
204
205                 mode = QED_FILTER_CONFIG_MODE_DISABLE;
206                 edev->ops->configure_arfs_searcher(edev->cdev, mode);
207                 edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE;
208         }
209 }
210
211 void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
212 {
213         struct qede_arfs_fltr_node *fltr = filter;
214         struct qede_dev *edev = dev;
215
216         fltr->fw_rc = fw_rc;
217
218         if (fw_rc) {
219                 DP_NOTICE(edev,
220                           "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
221                           fw_rc, fltr->flow_id, fltr->sw_id,
222                           ntohs(fltr->tuple.src_port),
223                           ntohs(fltr->tuple.dst_port), fltr->rxq_id);
224
225                 spin_lock_bh(&edev->arfs->arfs_list_lock);
226
227                 fltr->used = false;
228                 clear_bit(QEDE_FLTR_VALID, &fltr->state);
229
230                 spin_unlock_bh(&edev->arfs->arfs_list_lock);
231                 return;
232         }
233
234         spin_lock_bh(&edev->arfs->arfs_list_lock);
235
236         fltr->used = false;
237
238         if (fltr->filter_op) {
239                 set_bit(QEDE_FLTR_VALID, &fltr->state);
240                 if (fltr->rxq_id != fltr->next_rxq_id)
241                         qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
242                                                  false);
243         } else {
244                 clear_bit(QEDE_FLTR_VALID, &fltr->state);
245                 if (fltr->rxq_id != fltr->next_rxq_id) {
246                         fltr->rxq_id = fltr->next_rxq_id;
247                         qede_configure_arfs_fltr(edev, fltr,
248                                                  fltr->rxq_id, true);
249                 }
250         }
251
252         spin_unlock_bh(&edev->arfs->arfs_list_lock);
253 }
254
255 /* Should be called while qede_lock is held */
256 void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
257 {
258         int i;
259
260         for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
261                 struct hlist_node *temp;
262                 struct hlist_head *head;
263                 struct qede_arfs_fltr_node *fltr;
264
265                 head = &edev->arfs->arfs_hl_head[i];
266
267                 hlist_for_each_entry_safe(fltr, temp, head, node) {
268                         bool del = false;
269
270                         if (edev->state != QEDE_STATE_OPEN)
271                                 del = true;
272
273                         spin_lock_bh(&edev->arfs->arfs_list_lock);
274
275                         if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
276                              !fltr->used) || free_fltr) {
277                                 qede_dequeue_fltr_and_config_searcher(edev,
278                                                                       fltr);
279                         } else {
280                                 bool flow_exp = false;
281 #ifdef CONFIG_RFS_ACCEL
282                                 flow_exp = rps_may_expire_flow(edev->ndev,
283                                                                fltr->rxq_id,
284                                                                fltr->flow_id,
285                                                                fltr->sw_id);
286 #endif
287                                 if ((flow_exp || del) && !free_fltr)
288                                         qede_configure_arfs_fltr(edev, fltr,
289                                                                  fltr->rxq_id,
290                                                                  false);
291                         }
292
293                         spin_unlock_bh(&edev->arfs->arfs_list_lock);
294                 }
295         }
296
297 #ifdef CONFIG_RFS_ACCEL
298         spin_lock_bh(&edev->arfs->arfs_list_lock);
299
300         if (edev->arfs->filter_count) {
301                 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
302                 schedule_delayed_work(&edev->sp_task,
303                                       QEDE_SP_TASK_POLL_DELAY);
304         }
305
306         spin_unlock_bh(&edev->arfs->arfs_list_lock);
307 #endif
308 }
309
310 /* This function waits until all aRFS filters get deleted and freed.
311  * On timeout it frees all filters forcefully.
312  */
313 void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
314 {
315         int count = QEDE_ARFS_POLL_COUNT;
316
317         while (count) {
318                 qede_process_arfs_filters(edev, false);
319
320                 if (!edev->arfs->filter_count)
321                         break;
322
323                 msleep(100);
324                 count--;
325         }
326
327         if (!count) {
328                 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
329
330                 /* Something is terribly wrong, free forcefully */
331                 qede_process_arfs_filters(edev, true);
332         }
333 }
334
335 int qede_alloc_arfs(struct qede_dev *edev)
336 {
337         int i;
338
339         edev->arfs = vzalloc(sizeof(*edev->arfs));
340         if (!edev->arfs)
341                 return -ENOMEM;
342
343         spin_lock_init(&edev->arfs->arfs_list_lock);
344
345         for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
346                 INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
347
348         edev->arfs->arfs_fltr_bmap =
349                 vzalloc(array_size(sizeof(long),
350                                    BITS_TO_LONGS(QEDE_RFS_MAX_FLTR)));
351         if (!edev->arfs->arfs_fltr_bmap) {
352                 vfree(edev->arfs);
353                 edev->arfs = NULL;
354                 return -ENOMEM;
355         }
356
357 #ifdef CONFIG_RFS_ACCEL
358         edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
359         if (!edev->ndev->rx_cpu_rmap) {
360                 vfree(edev->arfs->arfs_fltr_bmap);
361                 edev->arfs->arfs_fltr_bmap = NULL;
362                 vfree(edev->arfs);
363                 edev->arfs = NULL;
364                 return -ENOMEM;
365         }
366 #endif
367         return 0;
368 }
369
370 void qede_free_arfs(struct qede_dev *edev)
371 {
372         if (!edev->arfs)
373                 return;
374
375 #ifdef CONFIG_RFS_ACCEL
376         if (edev->ndev->rx_cpu_rmap)
377                 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
378
379         edev->ndev->rx_cpu_rmap = NULL;
380 #endif
381         vfree(edev->arfs->arfs_fltr_bmap);
382         edev->arfs->arfs_fltr_bmap = NULL;
383         vfree(edev->arfs);
384         edev->arfs = NULL;
385 }
386
387 #ifdef CONFIG_RFS_ACCEL
388 static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
389                                  const struct sk_buff *skb)
390 {
391         if (skb->protocol == htons(ETH_P_IP)) {
392                 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
393                     tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
394                         return true;
395                 else
396                         return false;
397         } else {
398                 struct in6_addr *src = &tpos->tuple.src_ipv6;
399                 u8 size = sizeof(struct in6_addr);
400
401                 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
402                     !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
403                         return true;
404                 else
405                         return false;
406         }
407 }
408
409 static struct qede_arfs_fltr_node *
410 qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
411                           __be16 src_port, __be16 dst_port, u8 ip_proto)
412 {
413         struct qede_arfs_fltr_node *tpos;
414
415         hlist_for_each_entry(tpos, h, node)
416                 if (tpos->tuple.ip_proto == ip_proto &&
417                     tpos->tuple.eth_proto == skb->protocol &&
418                     qede_compare_ip_addr(tpos, skb) &&
419                     tpos->tuple.src_port == src_port &&
420                     tpos->tuple.dst_port == dst_port)
421                         return tpos;
422
423         return NULL;
424 }
425
426 static struct qede_arfs_fltr_node *
427 qede_alloc_filter(struct qede_dev *edev, int min_hlen)
428 {
429         struct qede_arfs_fltr_node *n;
430         int bit_id;
431
432         bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
433                                      QEDE_RFS_MAX_FLTR);
434
435         if (bit_id >= QEDE_RFS_MAX_FLTR)
436                 return NULL;
437
438         n = kzalloc(sizeof(*n), GFP_ATOMIC);
439         if (!n)
440                 return NULL;
441
442         n->data = kzalloc(min_hlen, GFP_ATOMIC);
443         if (!n->data) {
444                 kfree(n);
445                 return NULL;
446         }
447
448         n->sw_id = (u16)bit_id;
449         set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
450         return n;
451 }
452
453 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
454                        u16 rxq_index, u32 flow_id)
455 {
456         struct qede_dev *edev = netdev_priv(dev);
457         struct qede_arfs_fltr_node *n;
458         int min_hlen, rc, tp_offset;
459         struct ethhdr *eth;
460         __be16 *ports;
461         u16 tbl_idx;
462         u8 ip_proto;
463
464         if (skb->encapsulation)
465                 return -EPROTONOSUPPORT;
466
467         if (skb->protocol != htons(ETH_P_IP) &&
468             skb->protocol != htons(ETH_P_IPV6))
469                 return -EPROTONOSUPPORT;
470
471         if (skb->protocol == htons(ETH_P_IP)) {
472                 ip_proto = ip_hdr(skb)->protocol;
473                 tp_offset = sizeof(struct iphdr);
474         } else {
475                 ip_proto = ipv6_hdr(skb)->nexthdr;
476                 tp_offset = sizeof(struct ipv6hdr);
477         }
478
479         if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
480                 return -EPROTONOSUPPORT;
481
482         ports = (__be16 *)(skb->data + tp_offset);
483         tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
484
485         spin_lock_bh(&edev->arfs->arfs_list_lock);
486
487         n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
488                                       skb, ports[0], ports[1], ip_proto);
489         if (n) {
490                 /* Filter match */
491                 n->next_rxq_id = rxq_index;
492
493                 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
494                         if (n->rxq_id != rxq_index)
495                                 qede_configure_arfs_fltr(edev, n, n->rxq_id,
496                                                          false);
497                 } else {
498                         if (!n->used) {
499                                 n->rxq_id = rxq_index;
500                                 qede_configure_arfs_fltr(edev, n, n->rxq_id,
501                                                          true);
502                         }
503                 }
504
505                 rc = n->sw_id;
506                 goto ret_unlock;
507         }
508
509         min_hlen = ETH_HLEN + skb_headlen(skb);
510
511         n = qede_alloc_filter(edev, min_hlen);
512         if (!n) {
513                 rc = -ENOMEM;
514                 goto ret_unlock;
515         }
516
517         n->buf_len = min_hlen;
518         n->rxq_id = rxq_index;
519         n->next_rxq_id = rxq_index;
520         n->tuple.src_port = ports[0];
521         n->tuple.dst_port = ports[1];
522         n->flow_id = flow_id;
523
524         if (skb->protocol == htons(ETH_P_IP)) {
525                 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
526                 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
527         } else {
528                 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
529                        sizeof(struct in6_addr));
530                 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
531                        sizeof(struct in6_addr));
532         }
533
534         eth = (struct ethhdr *)n->data;
535         eth->h_proto = skb->protocol;
536         n->tuple.eth_proto = skb->protocol;
537         n->tuple.ip_proto = ip_proto;
538         n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
539         memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
540
541         rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
542         if (rc)
543                 goto ret_unlock;
544
545         qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
546
547         spin_unlock_bh(&edev->arfs->arfs_list_lock);
548
549         set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
550         schedule_delayed_work(&edev->sp_task, 0);
551
552         return n->sw_id;
553
554 ret_unlock:
555         spin_unlock_bh(&edev->arfs->arfs_list_lock);
556         return rc;
557 }
558 #endif
559
560 void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
561 {
562         struct qede_dev *edev = dev;
563
564         if (edev->vxlan_dst_port != vxlan_port)
565                 edev->vxlan_dst_port = 0;
566
567         if (edev->geneve_dst_port != geneve_port)
568                 edev->geneve_dst_port = 0;
569 }
570
571 void qede_force_mac(void *dev, u8 *mac, bool forced)
572 {
573         struct qede_dev *edev = dev;
574
575         __qede_lock(edev);
576
577         if (!is_valid_ether_addr(mac)) {
578                 __qede_unlock(edev);
579                 return;
580         }
581
582         ether_addr_copy(edev->ndev->dev_addr, mac);
583         __qede_unlock(edev);
584 }
585
586 void qede_fill_rss_params(struct qede_dev *edev,
587                           struct qed_update_vport_rss_params *rss, u8 *update)
588 {
589         bool need_reset = false;
590         int i;
591
592         if (QEDE_RSS_COUNT(edev) <= 1) {
593                 memset(rss, 0, sizeof(*rss));
594                 *update = 0;
595                 return;
596         }
597
598         /* Need to validate current RSS config uses valid entries */
599         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
600                 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
601                         need_reset = true;
602                         break;
603                 }
604         }
605
606         if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
607                 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
608                         u16 indir_val, val;
609
610                         val = QEDE_RSS_COUNT(edev);
611                         indir_val = ethtool_rxfh_indir_default(i, val);
612                         edev->rss_ind_table[i] = indir_val;
613                 }
614                 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
615         }
616
617         /* Now that we have the queue-indirection, prepare the handles */
618         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
619                 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
620
621                 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
622         }
623
624         if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
625                 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
626                 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
627         }
628         memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
629
630         if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
631                 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
632                     QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
633                 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
634         }
635         rss->rss_caps = edev->rss_caps;
636
637         *update = 1;
638 }
639
640 static int qede_set_ucast_rx_mac(struct qede_dev *edev,
641                                  enum qed_filter_xcast_params_type opcode,
642                                  unsigned char mac[ETH_ALEN])
643 {
644         struct qed_filter_params filter_cmd;
645
646         memset(&filter_cmd, 0, sizeof(filter_cmd));
647         filter_cmd.type = QED_FILTER_TYPE_UCAST;
648         filter_cmd.filter.ucast.type = opcode;
649         filter_cmd.filter.ucast.mac_valid = 1;
650         ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
651
652         return edev->ops->filter_config(edev->cdev, &filter_cmd);
653 }
654
655 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
656                                   enum qed_filter_xcast_params_type opcode,
657                                   u16 vid)
658 {
659         struct qed_filter_params filter_cmd;
660
661         memset(&filter_cmd, 0, sizeof(filter_cmd));
662         filter_cmd.type = QED_FILTER_TYPE_UCAST;
663         filter_cmd.filter.ucast.type = opcode;
664         filter_cmd.filter.ucast.vlan_valid = 1;
665         filter_cmd.filter.ucast.vlan = vid;
666
667         return edev->ops->filter_config(edev->cdev, &filter_cmd);
668 }
669
670 static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
671 {
672         struct qed_update_vport_params *params;
673         int rc;
674
675         /* Proceed only if action actually needs to be performed */
676         if (edev->accept_any_vlan == action)
677                 return 0;
678
679         params = vzalloc(sizeof(*params));
680         if (!params)
681                 return -ENOMEM;
682
683         params->vport_id = 0;
684         params->accept_any_vlan = action;
685         params->update_accept_any_vlan_flg = 1;
686
687         rc = edev->ops->vport_update(edev->cdev, params);
688         if (rc) {
689                 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
690                        action ? "enable" : "disable");
691         } else {
692                 DP_INFO(edev, "%s accept-any-vlan\n",
693                         action ? "enabled" : "disabled");
694                 edev->accept_any_vlan = action;
695         }
696
697         vfree(params);
698         return 0;
699 }
700
701 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
702 {
703         struct qede_dev *edev = netdev_priv(dev);
704         struct qede_vlan *vlan, *tmp;
705         int rc = 0;
706
707         DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
708
709         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
710         if (!vlan) {
711                 DP_INFO(edev, "Failed to allocate struct for vlan\n");
712                 return -ENOMEM;
713         }
714         INIT_LIST_HEAD(&vlan->list);
715         vlan->vid = vid;
716         vlan->configured = false;
717
718         /* Verify vlan isn't already configured */
719         list_for_each_entry(tmp, &edev->vlan_list, list) {
720                 if (tmp->vid == vlan->vid) {
721                         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
722                                    "vlan already configured\n");
723                         kfree(vlan);
724                         return -EEXIST;
725                 }
726         }
727
728         /* If interface is down, cache this VLAN ID and return */
729         __qede_lock(edev);
730         if (edev->state != QEDE_STATE_OPEN) {
731                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
732                            "Interface is down, VLAN %d will be configured when interface is up\n",
733                            vid);
734                 if (vid != 0)
735                         edev->non_configured_vlans++;
736                 list_add(&vlan->list, &edev->vlan_list);
737                 goto out;
738         }
739
740         /* Check for the filter limit.
741          * Note - vlan0 has a reserved filter and can be added without
742          * worrying about quota
743          */
744         if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
745             (vlan->vid == 0)) {
746                 rc = qede_set_ucast_rx_vlan(edev,
747                                             QED_FILTER_XCAST_TYPE_ADD,
748                                             vlan->vid);
749                 if (rc) {
750                         DP_ERR(edev, "Failed to configure VLAN %d\n",
751                                vlan->vid);
752                         kfree(vlan);
753                         goto out;
754                 }
755                 vlan->configured = true;
756
757                 /* vlan0 filter isn't consuming out of our quota */
758                 if (vlan->vid != 0)
759                         edev->configured_vlans++;
760         } else {
761                 /* Out of quota; Activate accept-any-VLAN mode */
762                 if (!edev->non_configured_vlans) {
763                         rc = qede_config_accept_any_vlan(edev, true);
764                         if (rc) {
765                                 kfree(vlan);
766                                 goto out;
767                         }
768                 }
769
770                 edev->non_configured_vlans++;
771         }
772
773         list_add(&vlan->list, &edev->vlan_list);
774
775 out:
776         __qede_unlock(edev);
777         return rc;
778 }
779
780 static void qede_del_vlan_from_list(struct qede_dev *edev,
781                                     struct qede_vlan *vlan)
782 {
783         /* vlan0 filter isn't consuming out of our quota */
784         if (vlan->vid != 0) {
785                 if (vlan->configured)
786                         edev->configured_vlans--;
787                 else
788                         edev->non_configured_vlans--;
789         }
790
791         list_del(&vlan->list);
792         kfree(vlan);
793 }
794
795 int qede_configure_vlan_filters(struct qede_dev *edev)
796 {
797         int rc = 0, real_rc = 0, accept_any_vlan = 0;
798         struct qed_dev_eth_info *dev_info;
799         struct qede_vlan *vlan = NULL;
800
801         if (list_empty(&edev->vlan_list))
802                 return 0;
803
804         dev_info = &edev->dev_info;
805
806         /* Configure non-configured vlans */
807         list_for_each_entry(vlan, &edev->vlan_list, list) {
808                 if (vlan->configured)
809                         continue;
810
811                 /* We have used all our credits, now enable accept_any_vlan */
812                 if ((vlan->vid != 0) &&
813                     (edev->configured_vlans == dev_info->num_vlan_filters)) {
814                         accept_any_vlan = 1;
815                         continue;
816                 }
817
818                 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
819
820                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
821                                             vlan->vid);
822                 if (rc) {
823                         DP_ERR(edev, "Failed to configure VLAN %u\n",
824                                vlan->vid);
825                         real_rc = rc;
826                         continue;
827                 }
828
829                 vlan->configured = true;
830                 /* vlan0 filter doesn't consume our VLAN filter's quota */
831                 if (vlan->vid != 0) {
832                         edev->non_configured_vlans--;
833                         edev->configured_vlans++;
834                 }
835         }
836
837         /* enable accept_any_vlan mode if we have more VLANs than credits,
838          * or remove accept_any_vlan mode if we've actually removed
839          * a non-configured vlan, and all remaining vlans are truly configured.
840          */
841
842         if (accept_any_vlan)
843                 rc = qede_config_accept_any_vlan(edev, true);
844         else if (!edev->non_configured_vlans)
845                 rc = qede_config_accept_any_vlan(edev, false);
846
847         if (rc && !real_rc)
848                 real_rc = rc;
849
850         return real_rc;
851 }
852
853 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
854 {
855         struct qede_dev *edev = netdev_priv(dev);
856         struct qede_vlan *vlan = NULL;
857         int rc = 0;
858
859         DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
860
861         /* Find whether entry exists */
862         __qede_lock(edev);
863         list_for_each_entry(vlan, &edev->vlan_list, list)
864                 if (vlan->vid == vid)
865                         break;
866
867         if (!vlan || (vlan->vid != vid)) {
868                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
869                            "Vlan isn't configured\n");
870                 goto out;
871         }
872
873         if (edev->state != QEDE_STATE_OPEN) {
874                 /* As interface is already down, we don't have a VPORT
875                  * instance to remove vlan filter. So just update vlan list
876                  */
877                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
878                            "Interface is down, removing VLAN from list only\n");
879                 qede_del_vlan_from_list(edev, vlan);
880                 goto out;
881         }
882
883         /* Remove vlan */
884         if (vlan->configured) {
885                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
886                                             vid);
887                 if (rc) {
888                         DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
889                         goto out;
890                 }
891         }
892
893         qede_del_vlan_from_list(edev, vlan);
894
895         /* We have removed a VLAN - try to see if we can
896          * configure non-configured VLAN from the list.
897          */
898         rc = qede_configure_vlan_filters(edev);
899
900 out:
901         __qede_unlock(edev);
902         return rc;
903 }
904
905 void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
906 {
907         struct qede_vlan *vlan = NULL;
908
909         if (list_empty(&edev->vlan_list))
910                 return;
911
912         list_for_each_entry(vlan, &edev->vlan_list, list) {
913                 if (!vlan->configured)
914                         continue;
915
916                 vlan->configured = false;
917
918                 /* vlan0 filter isn't consuming out of our quota */
919                 if (vlan->vid != 0) {
920                         edev->non_configured_vlans++;
921                         edev->configured_vlans--;
922                 }
923
924                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
925                            "marked vlan %d as non-configured\n", vlan->vid);
926         }
927
928         edev->accept_any_vlan = false;
929 }
930
931 static void qede_set_features_reload(struct qede_dev *edev,
932                                      struct qede_reload_args *args)
933 {
934         edev->ndev->features = args->u.features;
935 }
936
937 netdev_features_t qede_fix_features(struct net_device *dev,
938                                     netdev_features_t features)
939 {
940         struct qede_dev *edev = netdev_priv(dev);
941
942         if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
943             !(features & NETIF_F_GRO))
944                 features &= ~NETIF_F_GRO_HW;
945
946         return features;
947 }
948
949 int qede_set_features(struct net_device *dev, netdev_features_t features)
950 {
951         struct qede_dev *edev = netdev_priv(dev);
952         netdev_features_t changes = features ^ dev->features;
953         bool need_reload = false;
954
955         if (changes & NETIF_F_GRO_HW)
956                 need_reload = true;
957
958         if (need_reload) {
959                 struct qede_reload_args args;
960
961                 args.u.features = features;
962                 args.func = &qede_set_features_reload;
963
964                 /* Make sure that we definitely need to reload.
965                  * In case of an eBPF attached program, there will be no FW
966                  * aggregations, so no need to actually reload.
967                  */
968                 __qede_lock(edev);
969                 if (edev->xdp_prog)
970                         args.func(edev, &args);
971                 else
972                         qede_reload(edev, &args, true);
973                 __qede_unlock(edev);
974
975                 return 1;
976         }
977
978         return 0;
979 }
980
981 void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
982 {
983         struct qede_dev *edev = netdev_priv(dev);
984         struct qed_tunn_params tunn_params;
985         u16 t_port = ntohs(ti->port);
986         int rc;
987
988         memset(&tunn_params, 0, sizeof(tunn_params));
989
990         switch (ti->type) {
991         case UDP_TUNNEL_TYPE_VXLAN:
992                 if (!edev->dev_info.common.vxlan_enable)
993                         return;
994
995                 if (edev->vxlan_dst_port)
996                         return;
997
998                 tunn_params.update_vxlan_port = 1;
999                 tunn_params.vxlan_port = t_port;
1000
1001                 __qede_lock(edev);
1002                 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
1003                 __qede_unlock(edev);
1004
1005                 if (!rc) {
1006                         edev->vxlan_dst_port = t_port;
1007                         DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
1008                                    t_port);
1009                 } else {
1010                         DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
1011                                   t_port);
1012                 }
1013
1014                 break;
1015         case UDP_TUNNEL_TYPE_GENEVE:
1016                 if (!edev->dev_info.common.geneve_enable)
1017                         return;
1018
1019                 if (edev->geneve_dst_port)
1020                         return;
1021
1022                 tunn_params.update_geneve_port = 1;
1023                 tunn_params.geneve_port = t_port;
1024
1025                 __qede_lock(edev);
1026                 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
1027                 __qede_unlock(edev);
1028
1029                 if (!rc) {
1030                         edev->geneve_dst_port = t_port;
1031                         DP_VERBOSE(edev, QED_MSG_DEBUG,
1032                                    "Added geneve port=%d\n", t_port);
1033                 } else {
1034                         DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
1035                                   t_port);
1036                 }
1037
1038                 break;
1039         default:
1040                 return;
1041         }
1042 }
1043
1044 void qede_udp_tunnel_del(struct net_device *dev,
1045                          struct udp_tunnel_info *ti)
1046 {
1047         struct qede_dev *edev = netdev_priv(dev);
1048         struct qed_tunn_params tunn_params;
1049         u16 t_port = ntohs(ti->port);
1050
1051         memset(&tunn_params, 0, sizeof(tunn_params));
1052
1053         switch (ti->type) {
1054         case UDP_TUNNEL_TYPE_VXLAN:
1055                 if (t_port != edev->vxlan_dst_port)
1056                         return;
1057
1058                 tunn_params.update_vxlan_port = 1;
1059                 tunn_params.vxlan_port = 0;
1060
1061                 __qede_lock(edev);
1062                 edev->ops->tunn_config(edev->cdev, &tunn_params);
1063                 __qede_unlock(edev);
1064
1065                 edev->vxlan_dst_port = 0;
1066
1067                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
1068                            t_port);
1069
1070                 break;
1071         case UDP_TUNNEL_TYPE_GENEVE:
1072                 if (t_port != edev->geneve_dst_port)
1073                         return;
1074
1075                 tunn_params.update_geneve_port = 1;
1076                 tunn_params.geneve_port = 0;
1077
1078                 __qede_lock(edev);
1079                 edev->ops->tunn_config(edev->cdev, &tunn_params);
1080                 __qede_unlock(edev);
1081
1082                 edev->geneve_dst_port = 0;
1083
1084                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
1085                            t_port);
1086                 break;
1087         default:
1088                 return;
1089         }
1090 }
1091
1092 static void qede_xdp_reload_func(struct qede_dev *edev,
1093                                  struct qede_reload_args *args)
1094 {
1095         struct bpf_prog *old;
1096
1097         old = xchg(&edev->xdp_prog, args->u.new_prog);
1098         if (old)
1099                 bpf_prog_put(old);
1100 }
1101
1102 static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1103 {
1104         struct qede_reload_args args;
1105
1106         /* If we're called, there was already a bpf reference increment */
1107         args.func = &qede_xdp_reload_func;
1108         args.u.new_prog = prog;
1109         qede_reload(edev, &args, false);
1110
1111         return 0;
1112 }
1113
1114 int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1115 {
1116         struct qede_dev *edev = netdev_priv(dev);
1117
1118         switch (xdp->command) {
1119         case XDP_SETUP_PROG:
1120                 return qede_xdp_set(edev, xdp->prog);
1121         case XDP_QUERY_PROG:
1122                 xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
1123                 return 0;
1124         default:
1125                 return -EINVAL;
1126         }
1127 }
1128
1129 static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1130                                  enum qed_filter_xcast_params_type opcode,
1131                                  unsigned char *mac, int num_macs)
1132 {
1133         struct qed_filter_params filter_cmd;
1134         int i;
1135
1136         memset(&filter_cmd, 0, sizeof(filter_cmd));
1137         filter_cmd.type = QED_FILTER_TYPE_MCAST;
1138         filter_cmd.filter.mcast.type = opcode;
1139         filter_cmd.filter.mcast.num = num_macs;
1140
1141         for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1142                 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1143
1144         return edev->ops->filter_config(edev->cdev, &filter_cmd);
1145 }
1146
1147 int qede_set_mac_addr(struct net_device *ndev, void *p)
1148 {
1149         struct qede_dev *edev = netdev_priv(ndev);
1150         struct sockaddr *addr = p;
1151         int rc = 0;
1152
1153         /* Make sure the state doesn't transition while changing the MAC.
1154          * Also, all flows accessing the dev_addr field are doing that under
1155          * this lock.
1156          */
1157         __qede_lock(edev);
1158
1159         if (!is_valid_ether_addr(addr->sa_data)) {
1160                 DP_NOTICE(edev, "The MAC address is not valid\n");
1161                 rc = -EFAULT;
1162                 goto out;
1163         }
1164
1165         if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1166                 DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
1167                           addr->sa_data);
1168                 rc = -EINVAL;
1169                 goto out;
1170         }
1171
1172         if (edev->state == QEDE_STATE_OPEN) {
1173                 /* Remove the previous primary mac */
1174                 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1175                                            ndev->dev_addr);
1176                 if (rc)
1177                         goto out;
1178         }
1179
1180         ether_addr_copy(ndev->dev_addr, addr->sa_data);
1181         DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
1182
1183         if (edev->state != QEDE_STATE_OPEN) {
1184                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1185                            "The device is currently down\n");
1186                 /* Ask PF to explicitly update a copy in bulletin board */
1187                 if (IS_VF(edev) && edev->ops->req_bulletin_update_mac)
1188                         edev->ops->req_bulletin_update_mac(edev->cdev,
1189                                                            ndev->dev_addr);
1190                 goto out;
1191         }
1192
1193         edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
1194
1195         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1196                                    ndev->dev_addr);
1197 out:
1198         __qede_unlock(edev);
1199         return rc;
1200 }
1201
1202 static int
1203 qede_configure_mcast_filtering(struct net_device *ndev,
1204                                enum qed_filter_rx_mode_type *accept_flags)
1205 {
1206         struct qede_dev *edev = netdev_priv(ndev);
1207         unsigned char *mc_macs, *temp;
1208         struct netdev_hw_addr *ha;
1209         int rc = 0, mc_count;
1210         size_t size;
1211
1212         size = 64 * ETH_ALEN;
1213
1214         mc_macs = kzalloc(size, GFP_KERNEL);
1215         if (!mc_macs) {
1216                 DP_NOTICE(edev,
1217                           "Failed to allocate memory for multicast MACs\n");
1218                 rc = -ENOMEM;
1219                 goto exit;
1220         }
1221
1222         temp = mc_macs;
1223
1224         /* Remove all previously configured MAC filters */
1225         rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1226                                    mc_macs, 1);
1227         if (rc)
1228                 goto exit;
1229
1230         netif_addr_lock_bh(ndev);
1231
1232         mc_count = netdev_mc_count(ndev);
1233         if (mc_count < 64) {
1234                 netdev_for_each_mc_addr(ha, ndev) {
1235                         ether_addr_copy(temp, ha->addr);
1236                         temp += ETH_ALEN;
1237                 }
1238         }
1239
1240         netif_addr_unlock_bh(ndev);
1241
1242         /* Check for all multicast @@@TBD resource allocation */
1243         if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1244                 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1245                         *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1246         } else {
1247                 /* Add all multicast MAC filters */
1248                 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1249                                            mc_macs, mc_count);
1250         }
1251
1252 exit:
1253         kfree(mc_macs);
1254         return rc;
1255 }
1256
1257 void qede_set_rx_mode(struct net_device *ndev)
1258 {
1259         struct qede_dev *edev = netdev_priv(ndev);
1260
1261         set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1262         schedule_delayed_work(&edev->sp_task, 0);
1263 }
1264
1265 /* Must be called with qede_lock held */
1266 void qede_config_rx_mode(struct net_device *ndev)
1267 {
1268         enum qed_filter_rx_mode_type accept_flags;
1269         struct qede_dev *edev = netdev_priv(ndev);
1270         struct qed_filter_params rx_mode;
1271         unsigned char *uc_macs, *temp;
1272         struct netdev_hw_addr *ha;
1273         int rc, uc_count;
1274         size_t size;
1275
1276         netif_addr_lock_bh(ndev);
1277
1278         uc_count = netdev_uc_count(ndev);
1279         size = uc_count * ETH_ALEN;
1280
1281         uc_macs = kzalloc(size, GFP_ATOMIC);
1282         if (!uc_macs) {
1283                 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1284                 netif_addr_unlock_bh(ndev);
1285                 return;
1286         }
1287
1288         temp = uc_macs;
1289         netdev_for_each_uc_addr(ha, ndev) {
1290                 ether_addr_copy(temp, ha->addr);
1291                 temp += ETH_ALEN;
1292         }
1293
1294         netif_addr_unlock_bh(ndev);
1295
1296         /* Configure the struct for the Rx mode */
1297         memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1298         rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1299
1300         /* Remove all previous unicast secondary macs and multicast macs
1301          * (configrue / leave the primary mac)
1302          */
1303         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1304                                    edev->ndev->dev_addr);
1305         if (rc)
1306                 goto out;
1307
1308         /* Check for promiscuous */
1309         if (ndev->flags & IFF_PROMISC)
1310                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1311         else
1312                 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1313
1314         /* Configure all filters regardless, in case promisc is rejected */
1315         if (uc_count < edev->dev_info.num_mac_filters) {
1316                 int i;
1317
1318                 temp = uc_macs;
1319                 for (i = 0; i < uc_count; i++) {
1320                         rc = qede_set_ucast_rx_mac(edev,
1321                                                    QED_FILTER_XCAST_TYPE_ADD,
1322                                                    temp);
1323                         if (rc)
1324                                 goto out;
1325
1326                         temp += ETH_ALEN;
1327                 }
1328         } else {
1329                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1330         }
1331
1332         rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1333         if (rc)
1334                 goto out;
1335
1336         /* take care of VLAN mode */
1337         if (ndev->flags & IFF_PROMISC) {
1338                 qede_config_accept_any_vlan(edev, true);
1339         } else if (!edev->non_configured_vlans) {
1340                 /* It's possible that accept_any_vlan mode is set due to a
1341                  * previous setting of IFF_PROMISC. If vlan credits are
1342                  * sufficient, disable accept_any_vlan.
1343                  */
1344                 qede_config_accept_any_vlan(edev, false);
1345         }
1346
1347         rx_mode.filter.accept_flags = accept_flags;
1348         edev->ops->filter_config(edev->cdev, &rx_mode);
1349 out:
1350         kfree(uc_macs);
1351 }
1352
1353 static struct qede_arfs_fltr_node *
1354 qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
1355 {
1356         struct qede_arfs_fltr_node *fltr;
1357
1358         hlist_for_each_entry(fltr, head, node)
1359                 if (location == fltr->sw_id)
1360                         return fltr;
1361
1362         return NULL;
1363 }
1364
1365 int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
1366                           u32 *rule_locs)
1367 {
1368         struct qede_arfs_fltr_node *fltr;
1369         struct hlist_head *head;
1370         int cnt = 0, rc = 0;
1371
1372         info->data = QEDE_RFS_MAX_FLTR;
1373
1374         __qede_lock(edev);
1375
1376         if (!edev->arfs) {
1377                 rc = -EPERM;
1378                 goto unlock;
1379         }
1380
1381         head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1382
1383         hlist_for_each_entry(fltr, head, node) {
1384                 if (cnt == info->rule_cnt) {
1385                         rc = -EMSGSIZE;
1386                         goto unlock;
1387                 }
1388
1389                 rule_locs[cnt] = fltr->sw_id;
1390                 cnt++;
1391         }
1392
1393         info->rule_cnt = cnt;
1394
1395 unlock:
1396         __qede_unlock(edev);
1397         return rc;
1398 }
1399
1400 int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
1401 {
1402         struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1403         struct qede_arfs_fltr_node *fltr = NULL;
1404         int rc = 0;
1405
1406         cmd->data = QEDE_RFS_MAX_FLTR;
1407
1408         __qede_lock(edev);
1409
1410         if (!edev->arfs) {
1411                 rc = -EPERM;
1412                 goto unlock;
1413         }
1414
1415         fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1416                                          fsp->location);
1417         if (!fltr) {
1418                 DP_NOTICE(edev, "Rule not found - location=0x%x\n",
1419                           fsp->location);
1420                 rc = -EINVAL;
1421                 goto unlock;
1422         }
1423
1424         if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
1425                 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1426                         fsp->flow_type = TCP_V4_FLOW;
1427                 else
1428                         fsp->flow_type = UDP_V4_FLOW;
1429
1430                 fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
1431                 fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
1432                 fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
1433                 fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
1434         } else {
1435                 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1436                         fsp->flow_type = TCP_V6_FLOW;
1437                 else
1438                         fsp->flow_type = UDP_V6_FLOW;
1439                 fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
1440                 fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
1441                 memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
1442                        &fltr->tuple.src_ipv6, sizeof(struct in6_addr));
1443                 memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
1444                        &fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
1445         }
1446
1447         fsp->ring_cookie = fltr->rxq_id;
1448
1449         if (fltr->vfid) {
1450                 fsp->ring_cookie |= ((u64)fltr->vfid) <<
1451                                         ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
1452         }
1453
1454         if (fltr->b_is_drop)
1455                 fsp->ring_cookie = RX_CLS_FLOW_DISC;
1456 unlock:
1457         __qede_unlock(edev);
1458         return rc;
1459 }
1460
1461 static int
1462 qede_poll_arfs_filter_config(struct qede_dev *edev,
1463                              struct qede_arfs_fltr_node *fltr)
1464 {
1465         int count = QEDE_ARFS_POLL_COUNT;
1466
1467         while (fltr->used && count) {
1468                 msleep(20);
1469                 count--;
1470         }
1471
1472         if (count == 0 || fltr->fw_rc) {
1473                 DP_NOTICE(edev, "Timeout in polling filter config\n");
1474                 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1475                 return -EIO;
1476         }
1477
1478         return fltr->fw_rc;
1479 }
1480
1481 static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t)
1482 {
1483         int size = ETH_HLEN;
1484
1485         if (t->eth_proto == htons(ETH_P_IP))
1486                 size += sizeof(struct iphdr);
1487         else
1488                 size += sizeof(struct ipv6hdr);
1489
1490         if (t->ip_proto == IPPROTO_TCP)
1491                 size += sizeof(struct tcphdr);
1492         else
1493                 size += sizeof(struct udphdr);
1494
1495         return size;
1496 }
1497
1498 static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a,
1499                                     struct qede_arfs_tuple *b)
1500 {
1501         if (a->eth_proto != htons(ETH_P_IP) ||
1502             b->eth_proto != htons(ETH_P_IP))
1503                 return false;
1504
1505         return (a->src_ipv4 == b->src_ipv4) &&
1506                (a->dst_ipv4 == b->dst_ipv4);
1507 }
1508
1509 static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t,
1510                                      void *header)
1511 {
1512         __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr));
1513         struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN);
1514         struct ethhdr *eth = (struct ethhdr *)header;
1515
1516         eth->h_proto = t->eth_proto;
1517         ip->saddr = t->src_ipv4;
1518         ip->daddr = t->dst_ipv4;
1519         ip->version = 0x4;
1520         ip->ihl = 0x5;
1521         ip->protocol = t->ip_proto;
1522         ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN);
1523
1524         /* ports is weakly typed to suit both TCP and UDP ports */
1525         ports[0] = t->src_port;
1526         ports[1] = t->dst_port;
1527 }
1528
1529 static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t,
1530                                          void *buffer)
1531 {
1532         const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP";
1533
1534         snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN,
1535                  "%s %pI4 (%04x) -> %pI4 (%04x)",
1536                  prefix, &t->src_ipv4, t->src_port,
1537                  &t->dst_ipv4, t->dst_port);
1538 }
1539
1540 static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a,
1541                                     struct qede_arfs_tuple *b)
1542 {
1543         if (a->eth_proto != htons(ETH_P_IPV6) ||
1544             b->eth_proto != htons(ETH_P_IPV6))
1545                 return false;
1546
1547         if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr)))
1548                 return false;
1549
1550         if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr)))
1551                 return false;
1552
1553         return true;
1554 }
1555
1556 static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
1557                                      void *header)
1558 {
1559         __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr));
1560         struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN);
1561         struct ethhdr *eth = (struct ethhdr *)header;
1562
1563         eth->h_proto = t->eth_proto;
1564         memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr));
1565         memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr));
1566         ip6->version = 0x6;
1567
1568         if (t->ip_proto == IPPROTO_TCP) {
1569                 ip6->nexthdr = NEXTHDR_TCP;
1570                 ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
1571         } else {
1572                 ip6->nexthdr = NEXTHDR_UDP;
1573                 ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
1574         }
1575
1576         /* ports is weakly typed to suit both TCP and UDP ports */
1577         ports[0] = t->src_port;
1578         ports[1] = t->dst_port;
1579 }
1580
1581 /* Validate fields which are set and not accepted by the driver */
1582 static int qede_flow_spec_validate_unused(struct qede_dev *edev,
1583                                           struct ethtool_rx_flow_spec *fs)
1584 {
1585         if (fs->flow_type & FLOW_MAC_EXT) {
1586                 DP_INFO(edev, "Don't support MAC extensions\n");
1587                 return -EOPNOTSUPP;
1588         }
1589
1590         if ((fs->flow_type & FLOW_EXT) &&
1591             (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) {
1592                 DP_INFO(edev, "Don't support vlan-based classification\n");
1593                 return -EOPNOTSUPP;
1594         }
1595
1596         if ((fs->flow_type & FLOW_EXT) &&
1597             (fs->h_ext.data[0] || fs->h_ext.data[1])) {
1598                 DP_INFO(edev, "Don't support user defined data\n");
1599                 return -EOPNOTSUPP;
1600         }
1601
1602         return 0;
1603 }
1604
1605 static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
1606                                         struct qede_arfs_tuple *t)
1607 {
1608         /* We must have Only 4-tuples/l4 port/src ip/dst ip
1609          * as an input.
1610          */
1611         if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
1612                 t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1613         } else if (!t->src_port && t->dst_port &&
1614                    !t->src_ipv4 && !t->dst_ipv4) {
1615                 t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1616         } else if (!t->src_port && !t->dst_port &&
1617                    !t->dst_ipv4 && t->src_ipv4) {
1618                 t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1619         } else if (!t->src_port && !t->dst_port &&
1620                    t->dst_ipv4 && !t->src_ipv4) {
1621                 t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1622         } else {
1623                 DP_INFO(edev, "Invalid N-tuple\n");
1624                 return -EOPNOTSUPP;
1625         }
1626
1627         t->ip_comp = qede_flow_spec_ipv4_cmp;
1628         t->build_hdr = qede_flow_build_ipv4_hdr;
1629         t->stringify = qede_flow_stringify_ipv4_hdr;
1630
1631         return 0;
1632 }
1633
1634 static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
1635                                         struct qede_arfs_tuple *t,
1636                                         struct in6_addr *zaddr)
1637 {
1638         /* We must have Only 4-tuples/l4 port/src ip/dst ip
1639          * as an input.
1640          */
1641         if (t->src_port && t->dst_port &&
1642             memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1643             memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1644                 t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1645         } else if (!t->src_port && t->dst_port &&
1646                    !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1647                    !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1648                 t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1649         } else if (!t->src_port && !t->dst_port &&
1650                    !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1651                    memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1652                 t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1653         } else if (!t->src_port && !t->dst_port &&
1654                    memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1655                    !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1656                 t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1657         } else {
1658                 DP_INFO(edev, "Invalid N-tuple\n");
1659                 return -EOPNOTSUPP;
1660         }
1661
1662         t->ip_comp = qede_flow_spec_ipv6_cmp;
1663         t->build_hdr = qede_flow_build_ipv6_hdr;
1664
1665         return 0;
1666 }
1667
1668 static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
1669                                                struct qede_arfs_tuple *t,
1670                                                struct ethtool_rx_flow_spec *fs)
1671 {
1672         if ((fs->h_u.tcp_ip4_spec.ip4src &
1673              fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) {
1674                 DP_INFO(edev, "Don't support IP-masks\n");
1675                 return -EOPNOTSUPP;
1676         }
1677
1678         if ((fs->h_u.tcp_ip4_spec.ip4dst &
1679              fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) {
1680                 DP_INFO(edev, "Don't support IP-masks\n");
1681                 return -EOPNOTSUPP;
1682         }
1683
1684         if ((fs->h_u.tcp_ip4_spec.psrc &
1685              fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) {
1686                 DP_INFO(edev, "Don't support port-masks\n");
1687                 return -EOPNOTSUPP;
1688         }
1689
1690         if ((fs->h_u.tcp_ip4_spec.pdst &
1691              fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) {
1692                 DP_INFO(edev, "Don't support port-masks\n");
1693                 return -EOPNOTSUPP;
1694         }
1695
1696         if (fs->h_u.tcp_ip4_spec.tos) {
1697                 DP_INFO(edev, "Don't support tos\n");
1698                 return -EOPNOTSUPP;
1699         }
1700
1701         t->eth_proto = htons(ETH_P_IP);
1702         t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src;
1703         t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst;
1704         t->src_port = fs->h_u.tcp_ip4_spec.psrc;
1705         t->dst_port = fs->h_u.tcp_ip4_spec.pdst;
1706
1707         return qede_set_v4_tuple_to_profile(edev, t);
1708 }
1709
1710 static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev,
1711                                          struct qede_arfs_tuple *t,
1712                                          struct ethtool_rx_flow_spec *fs)
1713 {
1714         t->ip_proto = IPPROTO_TCP;
1715
1716         if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
1717                 return -EINVAL;
1718
1719         return 0;
1720 }
1721
1722 static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev,
1723                                          struct qede_arfs_tuple *t,
1724                                          struct ethtool_rx_flow_spec *fs)
1725 {
1726         t->ip_proto = IPPROTO_UDP;
1727
1728         if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
1729                 return -EINVAL;
1730
1731         return 0;
1732 }
1733
1734 static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
1735                                                struct qede_arfs_tuple *t,
1736                                                struct ethtool_rx_flow_spec *fs)
1737 {
1738         struct in6_addr zero_addr;
1739
1740         memset(&zero_addr, 0, sizeof(zero_addr));
1741
1742         if ((fs->h_u.tcp_ip6_spec.psrc &
1743              fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) {
1744                 DP_INFO(edev, "Don't support port-masks\n");
1745                 return -EOPNOTSUPP;
1746         }
1747
1748         if ((fs->h_u.tcp_ip6_spec.pdst &
1749              fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) {
1750                 DP_INFO(edev, "Don't support port-masks\n");
1751                 return -EOPNOTSUPP;
1752         }
1753
1754         if (fs->h_u.tcp_ip6_spec.tclass) {
1755                 DP_INFO(edev, "Don't support tclass\n");
1756                 return -EOPNOTSUPP;
1757         }
1758
1759         t->eth_proto = htons(ETH_P_IPV6);
1760         memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src,
1761                sizeof(struct in6_addr));
1762         memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst,
1763                sizeof(struct in6_addr));
1764         t->src_port = fs->h_u.tcp_ip6_spec.psrc;
1765         t->dst_port = fs->h_u.tcp_ip6_spec.pdst;
1766
1767         return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
1768 }
1769
1770 static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev,
1771                                          struct qede_arfs_tuple *t,
1772                                          struct ethtool_rx_flow_spec *fs)
1773 {
1774         t->ip_proto = IPPROTO_TCP;
1775
1776         if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
1777                 return -EINVAL;
1778
1779         return 0;
1780 }
1781
1782 static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev,
1783                                          struct qede_arfs_tuple *t,
1784                                          struct ethtool_rx_flow_spec *fs)
1785 {
1786         t->ip_proto = IPPROTO_UDP;
1787
1788         if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
1789                 return -EINVAL;
1790
1791         return 0;
1792 }
1793
1794 static int qede_flow_spec_to_tuple(struct qede_dev *edev,
1795                                    struct qede_arfs_tuple *t,
1796                                    struct ethtool_rx_flow_spec *fs)
1797 {
1798         memset(t, 0, sizeof(*t));
1799
1800         if (qede_flow_spec_validate_unused(edev, fs))
1801                 return -EOPNOTSUPP;
1802
1803         switch ((fs->flow_type & ~FLOW_EXT)) {
1804         case TCP_V4_FLOW:
1805                 return qede_flow_spec_to_tuple_tcpv4(edev, t, fs);
1806         case UDP_V4_FLOW:
1807                 return qede_flow_spec_to_tuple_udpv4(edev, t, fs);
1808         case TCP_V6_FLOW:
1809                 return qede_flow_spec_to_tuple_tcpv6(edev, t, fs);
1810         case UDP_V6_FLOW:
1811                 return qede_flow_spec_to_tuple_udpv6(edev, t, fs);
1812         default:
1813                 DP_VERBOSE(edev, NETIF_MSG_IFUP,
1814                            "Can't support flow of type %08x\n", fs->flow_type);
1815                 return -EOPNOTSUPP;
1816         }
1817
1818         return 0;
1819 }
1820
1821 static int qede_flow_spec_validate(struct qede_dev *edev,
1822                                    struct ethtool_rx_flow_spec *fs,
1823                                    struct qede_arfs_tuple *t)
1824 {
1825         if (fs->location >= QEDE_RFS_MAX_FLTR) {
1826                 DP_INFO(edev, "Location out-of-bounds\n");
1827                 return -EINVAL;
1828         }
1829
1830         /* Check location isn't already in use */
1831         if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) {
1832                 DP_INFO(edev, "Location already in use\n");
1833                 return -EINVAL;
1834         }
1835
1836         /* Check if the filtering-mode could support the filter */
1837         if (edev->arfs->filter_count &&
1838             edev->arfs->mode != t->mode) {
1839                 DP_INFO(edev,
1840                         "flow_spec would require filtering mode %08x, but %08x is configured\n",
1841                         t->mode, edev->arfs->filter_count);
1842                 return -EINVAL;
1843         }
1844
1845         /* If drop requested then no need to validate other data */
1846         if (fs->ring_cookie == RX_CLS_FLOW_DISC)
1847                 return 0;
1848
1849         if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie))
1850                 return 0;
1851
1852         if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) {
1853                 DP_INFO(edev, "Queue out-of-bounds\n");
1854                 return -EINVAL;
1855         }
1856
1857         return 0;
1858 }
1859
1860 /* Must be called while qede lock is held */
1861 static struct qede_arfs_fltr_node *
1862 qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
1863 {
1864         struct qede_arfs_fltr_node *fltr;
1865         struct hlist_node *temp;
1866         struct hlist_head *head;
1867
1868         head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1869
1870         hlist_for_each_entry_safe(fltr, temp, head, node) {
1871                 if (fltr->tuple.ip_proto == t->ip_proto &&
1872                     fltr->tuple.src_port == t->src_port &&
1873                     fltr->tuple.dst_port == t->dst_port &&
1874                     t->ip_comp(&fltr->tuple, t))
1875                         return fltr;
1876         }
1877
1878         return NULL;
1879 }
1880
1881 static void qede_flow_set_destination(struct qede_dev *edev,
1882                                       struct qede_arfs_fltr_node *n,
1883                                       struct ethtool_rx_flow_spec *fs)
1884 {
1885         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
1886                 n->b_is_drop = true;
1887                 return;
1888         }
1889
1890         n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1891         n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie);
1892         n->next_rxq_id = n->rxq_id;
1893
1894         if (n->vfid)
1895                 DP_VERBOSE(edev, QED_MSG_SP,
1896                            "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
1897 }
1898
1899 int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
1900 {
1901         struct ethtool_rx_flow_spec *fsp = &info->fs;
1902         struct qede_arfs_fltr_node *n;
1903         struct qede_arfs_tuple t;
1904         int min_hlen, rc;
1905
1906         __qede_lock(edev);
1907
1908         if (!edev->arfs) {
1909                 rc = -EPERM;
1910                 goto unlock;
1911         }
1912
1913         /* Translate the flow specification into something fittign our DB */
1914         rc = qede_flow_spec_to_tuple(edev, &t, fsp);
1915         if (rc)
1916                 goto unlock;
1917
1918         /* Make sure location is valid and filter isn't already set */
1919         rc = qede_flow_spec_validate(edev, fsp, &t);
1920         if (rc)
1921                 goto unlock;
1922
1923         if (qede_flow_find_fltr(edev, &t)) {
1924                 rc = -EINVAL;
1925                 goto unlock;
1926         }
1927
1928         n = kzalloc(sizeof(*n), GFP_KERNEL);
1929         if (!n) {
1930                 rc = -ENOMEM;
1931                 goto unlock;
1932         }
1933
1934         min_hlen = qede_flow_get_min_header_size(&t);
1935         n->data = kzalloc(min_hlen, GFP_KERNEL);
1936         if (!n->data) {
1937                 kfree(n);
1938                 rc = -ENOMEM;
1939                 goto unlock;
1940         }
1941
1942         n->sw_id = fsp->location;
1943         set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
1944         n->buf_len = min_hlen;
1945
1946         memcpy(&n->tuple, &t, sizeof(n->tuple));
1947
1948         qede_flow_set_destination(edev, n, fsp);
1949
1950         /* Build a minimal header according to the flow */
1951         n->tuple.build_hdr(&n->tuple, n->data);
1952
1953         rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
1954         if (rc)
1955                 goto unlock;
1956
1957         qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
1958         rc = qede_poll_arfs_filter_config(edev, n);
1959 unlock:
1960         __qede_unlock(edev);
1961
1962         return rc;
1963 }
1964
1965 int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
1966 {
1967         struct qede_arfs_fltr_node *fltr = NULL;
1968         int rc = -EPERM;
1969
1970         __qede_lock(edev);
1971         if (!edev->arfs)
1972                 goto unlock;
1973
1974         fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1975                                          cookie);
1976         if (!fltr)
1977                 goto unlock;
1978
1979         qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
1980
1981         rc = qede_poll_arfs_filter_config(edev, fltr);
1982         if (rc == 0)
1983                 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1984
1985 unlock:
1986         __qede_unlock(edev);
1987         return rc;
1988 }
1989
1990 int qede_get_arfs_filter_count(struct qede_dev *edev)
1991 {
1992         int count = 0;
1993
1994         __qede_lock(edev);
1995
1996         if (!edev->arfs)
1997                 goto unlock;
1998
1999         count = edev->arfs->filter_count;
2000
2001 unlock:
2002         __qede_unlock(edev);
2003         return count;
2004 }
2005
2006 static int qede_parse_actions(struct qede_dev *edev,
2007                               struct tcf_exts *exts)
2008 {
2009         int rc = -EINVAL, num_act = 0, i;
2010         const struct tc_action *a;
2011         bool is_drop = false;
2012
2013         if (!tcf_exts_has_actions(exts)) {
2014                 DP_NOTICE(edev, "No tc actions received\n");
2015                 return rc;
2016         }
2017
2018         tcf_exts_for_each_action(i, a, exts) {
2019                 num_act++;
2020
2021                 if (is_tcf_gact_shot(a))
2022                         is_drop = true;
2023         }
2024
2025         if (num_act == 1 && is_drop)
2026                 return 0;
2027
2028         return rc;
2029 }
2030
2031 static int
2032 qede_tc_parse_ports(struct qede_dev *edev,
2033                     struct tc_cls_flower_offload *f,
2034                     struct qede_arfs_tuple *t)
2035 {
2036         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
2037                 struct flow_dissector_key_ports *key, *mask;
2038
2039                 key = skb_flow_dissector_target(f->dissector,
2040                                                 FLOW_DISSECTOR_KEY_PORTS,
2041                                                 f->key);
2042                 mask = skb_flow_dissector_target(f->dissector,
2043                                                  FLOW_DISSECTOR_KEY_PORTS,
2044                                                  f->mask);
2045
2046                 if ((key->src && mask->src != U16_MAX) ||
2047                     (key->dst && mask->dst != U16_MAX)) {
2048                         DP_NOTICE(edev, "Do not support ports masks\n");
2049                         return -EINVAL;
2050                 }
2051
2052                 t->src_port = key->src;
2053                 t->dst_port = key->dst;
2054         }
2055
2056         return 0;
2057 }
2058
2059 static int
2060 qede_tc_parse_v6_common(struct qede_dev *edev,
2061                         struct tc_cls_flower_offload *f,
2062                         struct qede_arfs_tuple *t)
2063 {
2064         struct in6_addr zero_addr, addr;
2065
2066         memset(&zero_addr, 0, sizeof(addr));
2067         memset(&addr, 0xff, sizeof(addr));
2068
2069         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
2070                 struct flow_dissector_key_ipv6_addrs *key, *mask;
2071
2072                 key = skb_flow_dissector_target(f->dissector,
2073                                                 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
2074                                                 f->key);
2075                 mask = skb_flow_dissector_target(f->dissector,
2076                                                  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
2077                                                  f->mask);
2078
2079                 if ((memcmp(&key->src, &zero_addr, sizeof(addr)) &&
2080                      memcmp(&mask->src, &addr, sizeof(addr))) ||
2081                     (memcmp(&key->dst, &zero_addr, sizeof(addr)) &&
2082                      memcmp(&mask->dst, &addr, sizeof(addr)))) {
2083                         DP_NOTICE(edev,
2084                                   "Do not support IPv6 address prefix/mask\n");
2085                         return -EINVAL;
2086                 }
2087
2088                 memcpy(&t->src_ipv6, &key->src, sizeof(addr));
2089                 memcpy(&t->dst_ipv6, &key->dst, sizeof(addr));
2090         }
2091
2092         if (qede_tc_parse_ports(edev, f, t))
2093                 return -EINVAL;
2094
2095         return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
2096 }
2097
2098 static int
2099 qede_tc_parse_v4_common(struct qede_dev *edev,
2100                         struct tc_cls_flower_offload *f,
2101                         struct qede_arfs_tuple *t)
2102 {
2103         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
2104                 struct flow_dissector_key_ipv4_addrs *key, *mask;
2105
2106                 key = skb_flow_dissector_target(f->dissector,
2107                                                 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
2108                                                 f->key);
2109                 mask = skb_flow_dissector_target(f->dissector,
2110                                                  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
2111                                                  f->mask);
2112
2113                 if ((key->src && mask->src != U32_MAX) ||
2114                     (key->dst && mask->dst != U32_MAX)) {
2115                         DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
2116                         return -EINVAL;
2117                 }
2118
2119                 t->src_ipv4 = key->src;
2120                 t->dst_ipv4 = key->dst;
2121         }
2122
2123         if (qede_tc_parse_ports(edev, f, t))
2124                 return -EINVAL;
2125
2126         return qede_set_v4_tuple_to_profile(edev, t);
2127 }
2128
2129 static int
2130 qede_tc_parse_tcp_v6(struct qede_dev *edev,
2131                      struct tc_cls_flower_offload *f,
2132                      struct qede_arfs_tuple *tuple)
2133 {
2134         tuple->ip_proto = IPPROTO_TCP;
2135         tuple->eth_proto = htons(ETH_P_IPV6);
2136
2137         return qede_tc_parse_v6_common(edev, f, tuple);
2138 }
2139
2140 static int
2141 qede_tc_parse_tcp_v4(struct qede_dev *edev,
2142                      struct tc_cls_flower_offload *f,
2143                      struct qede_arfs_tuple *tuple)
2144 {
2145         tuple->ip_proto = IPPROTO_TCP;
2146         tuple->eth_proto = htons(ETH_P_IP);
2147
2148         return qede_tc_parse_v4_common(edev, f, tuple);
2149 }
2150
2151 static int
2152 qede_tc_parse_udp_v6(struct qede_dev *edev,
2153                      struct tc_cls_flower_offload *f,
2154                      struct qede_arfs_tuple *tuple)
2155 {
2156         tuple->ip_proto = IPPROTO_UDP;
2157         tuple->eth_proto = htons(ETH_P_IPV6);
2158
2159         return qede_tc_parse_v6_common(edev, f, tuple);
2160 }
2161
2162 static int
2163 qede_tc_parse_udp_v4(struct qede_dev *edev,
2164                      struct tc_cls_flower_offload *f,
2165                      struct qede_arfs_tuple *tuple)
2166 {
2167         tuple->ip_proto = IPPROTO_UDP;
2168         tuple->eth_proto = htons(ETH_P_IP);
2169
2170         return qede_tc_parse_v4_common(edev, f, tuple);
2171 }
2172
2173 static int
2174 qede_parse_flower_attr(struct qede_dev *edev, __be16 proto,
2175                        struct tc_cls_flower_offload *f,
2176                        struct qede_arfs_tuple *tuple)
2177 {
2178         int rc = -EINVAL;
2179         u8 ip_proto = 0;
2180
2181         memset(tuple, 0, sizeof(*tuple));
2182
2183         if (f->dissector->used_keys &
2184             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2185               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2186               BIT(FLOW_DISSECTOR_KEY_BASIC) |
2187               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2188               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
2189                 DP_NOTICE(edev, "Unsupported key set:0x%x\n",
2190                           f->dissector->used_keys);
2191                 return -EOPNOTSUPP;
2192         }
2193
2194         if (proto != htons(ETH_P_IP) &&
2195             proto != htons(ETH_P_IPV6)) {
2196                 DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
2197                 return -EPROTONOSUPPORT;
2198         }
2199
2200         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
2201                 struct flow_dissector_key_basic *key;
2202
2203                 key = skb_flow_dissector_target(f->dissector,
2204                                                 FLOW_DISSECTOR_KEY_BASIC,
2205                                                 f->key);
2206                 ip_proto = key->ip_proto;
2207         }
2208
2209         if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
2210                 rc = qede_tc_parse_tcp_v4(edev, f, tuple);
2211         else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
2212                 rc = qede_tc_parse_tcp_v6(edev, f, tuple);
2213         else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
2214                 rc = qede_tc_parse_udp_v4(edev, f, tuple);
2215         else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
2216                 rc = qede_tc_parse_udp_v6(edev, f, tuple);
2217         else
2218                 DP_NOTICE(edev, "Invalid tc protocol request\n");
2219
2220         return rc;
2221 }
2222
2223 int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
2224                             struct tc_cls_flower_offload *f)
2225 {
2226         struct qede_arfs_fltr_node *n;
2227         int min_hlen, rc = -EINVAL;
2228         struct qede_arfs_tuple t;
2229
2230         __qede_lock(edev);
2231
2232         if (!edev->arfs) {
2233                 rc = -EPERM;
2234                 goto unlock;
2235         }
2236
2237         /* parse flower attribute and prepare filter */
2238         if (qede_parse_flower_attr(edev, proto, f, &t))
2239                 goto unlock;
2240
2241         /* Validate profile mode and number of filters */
2242         if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
2243             edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
2244                 DP_NOTICE(edev,
2245                           "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
2246                           t.mode, edev->arfs->mode, edev->arfs->filter_count);
2247                 goto unlock;
2248         }
2249
2250         /* parse tc actions and get the vf_id */
2251         if (qede_parse_actions(edev, f->exts))
2252                 goto unlock;
2253
2254         if (qede_flow_find_fltr(edev, &t)) {
2255                 rc = -EEXIST;
2256                 goto unlock;
2257         }
2258
2259         n = kzalloc(sizeof(*n), GFP_KERNEL);
2260         if (!n) {
2261                 rc = -ENOMEM;
2262                 goto unlock;
2263         }
2264
2265         min_hlen = qede_flow_get_min_header_size(&t);
2266
2267         n->data = kzalloc(min_hlen, GFP_KERNEL);
2268         if (!n->data) {
2269                 kfree(n);
2270                 rc = -ENOMEM;
2271                 goto unlock;
2272         }
2273
2274         memcpy(&n->tuple, &t, sizeof(n->tuple));
2275
2276         n->buf_len = min_hlen;
2277         n->b_is_drop = true;
2278         n->sw_id = f->cookie;
2279
2280         n->tuple.build_hdr(&n->tuple, n->data);
2281
2282         rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
2283         if (rc)
2284                 goto unlock;
2285
2286         qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
2287         rc = qede_poll_arfs_filter_config(edev, n);
2288
2289 unlock:
2290         __qede_unlock(edev);
2291         return rc;
2292 }