Linux-libre 5.4.49-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / netronome / nfp / flower / match.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #include <linux/bitfield.h>
5 #include <net/pkt_cls.h>
6
7 #include "cmsg.h"
8 #include "main.h"
9
10 static void
11 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
12                             struct nfp_flower_meta_tci *msk,
13                             struct flow_cls_offload *flow, u8 key_type)
14 {
15         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
16         u16 tmp_tci;
17
18         memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
19         memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
20
21         /* Populate the metadata frame. */
22         ext->nfp_flow_key_layer = key_type;
23         ext->mask_id = ~0;
24
25         msk->nfp_flow_key_layer = key_type;
26         msk->mask_id = ~0;
27
28         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
29                 struct flow_match_vlan match;
30
31                 flow_rule_match_vlan(rule, &match);
32                 /* Populate the tci field. */
33                 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
34                 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
35                                       match.key->vlan_priority) |
36                            FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
37                                       match.key->vlan_id);
38                 ext->tci = cpu_to_be16(tmp_tci);
39
40                 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
41                 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
42                                       match.mask->vlan_priority) |
43                            FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
44                                       match.mask->vlan_id);
45                 msk->tci = cpu_to_be16(tmp_tci);
46         }
47 }
48
49 static void
50 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
51 {
52         frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
53 }
54
55 static int
56 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
57                         bool mask_version, enum nfp_flower_tun_type tun_type,
58                         struct netlink_ext_ack *extack)
59 {
60         if (mask_version) {
61                 frame->in_port = cpu_to_be32(~0);
62                 return 0;
63         }
64
65         if (tun_type) {
66                 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
67         } else {
68                 if (!cmsg_port) {
69                         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload");
70                         return -EOPNOTSUPP;
71                 }
72                 frame->in_port = cpu_to_be32(cmsg_port);
73         }
74
75         return 0;
76 }
77
78 static void
79 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
80                        struct nfp_flower_mac_mpls *msk,
81                        struct flow_cls_offload *flow)
82 {
83         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
84
85         memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
86         memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
87
88         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
89                 struct flow_match_eth_addrs match;
90
91                 flow_rule_match_eth_addrs(rule, &match);
92                 /* Populate mac frame. */
93                 ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
94                 ether_addr_copy(ext->mac_src, &match.key->src[0]);
95                 ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
96                 ether_addr_copy(msk->mac_src, &match.mask->src[0]);
97         }
98
99         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
100                 struct flow_match_mpls match;
101                 u32 t_mpls;
102
103                 flow_rule_match_mpls(rule, &match);
104                 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) |
105                          FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) |
106                          FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) |
107                          NFP_FLOWER_MASK_MPLS_Q;
108                 ext->mpls_lse = cpu_to_be32(t_mpls);
109                 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) |
110                          FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) |
111                          FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) |
112                          NFP_FLOWER_MASK_MPLS_Q;
113                 msk->mpls_lse = cpu_to_be32(t_mpls);
114         } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
115                 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
116                  * bit, which indicates an mpls ether type but without any
117                  * mpls fields.
118                  */
119                 struct flow_match_basic match;
120
121                 flow_rule_match_basic(rule, &match);
122                 if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
123                     match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
124                         ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
125                         msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
126                 }
127         }
128 }
129
130 static void
131 nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
132                          struct nfp_flower_tp_ports *msk,
133                          struct flow_cls_offload *flow)
134 {
135         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
136
137         memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
138         memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
139
140         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
141                 struct flow_match_ports match;
142
143                 flow_rule_match_ports(rule, &match);
144                 ext->port_src = match.key->src;
145                 ext->port_dst = match.key->dst;
146                 msk->port_src = match.mask->src;
147                 msk->port_dst = match.mask->dst;
148         }
149 }
150
151 static void
152 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
153                           struct nfp_flower_ip_ext *msk,
154                           struct flow_cls_offload *flow)
155 {
156         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
157
158         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
159                 struct flow_match_basic match;
160
161                 flow_rule_match_basic(rule, &match);
162                 ext->proto = match.key->ip_proto;
163                 msk->proto = match.mask->ip_proto;
164         }
165
166         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
167                 struct flow_match_ip match;
168
169                 flow_rule_match_ip(rule, &match);
170                 ext->tos = match.key->tos;
171                 ext->ttl = match.key->ttl;
172                 msk->tos = match.mask->tos;
173                 msk->ttl = match.mask->ttl;
174         }
175
176         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
177                 u16 tcp_flags, tcp_flags_mask;
178                 struct flow_match_tcp match;
179
180                 flow_rule_match_tcp(rule, &match);
181                 tcp_flags = be16_to_cpu(match.key->flags);
182                 tcp_flags_mask = be16_to_cpu(match.mask->flags);
183
184                 if (tcp_flags & TCPHDR_FIN)
185                         ext->flags |= NFP_FL_TCP_FLAG_FIN;
186                 if (tcp_flags_mask & TCPHDR_FIN)
187                         msk->flags |= NFP_FL_TCP_FLAG_FIN;
188
189                 if (tcp_flags & TCPHDR_SYN)
190                         ext->flags |= NFP_FL_TCP_FLAG_SYN;
191                 if (tcp_flags_mask & TCPHDR_SYN)
192                         msk->flags |= NFP_FL_TCP_FLAG_SYN;
193
194                 if (tcp_flags & TCPHDR_RST)
195                         ext->flags |= NFP_FL_TCP_FLAG_RST;
196                 if (tcp_flags_mask & TCPHDR_RST)
197                         msk->flags |= NFP_FL_TCP_FLAG_RST;
198
199                 if (tcp_flags & TCPHDR_PSH)
200                         ext->flags |= NFP_FL_TCP_FLAG_PSH;
201                 if (tcp_flags_mask & TCPHDR_PSH)
202                         msk->flags |= NFP_FL_TCP_FLAG_PSH;
203
204                 if (tcp_flags & TCPHDR_URG)
205                         ext->flags |= NFP_FL_TCP_FLAG_URG;
206                 if (tcp_flags_mask & TCPHDR_URG)
207                         msk->flags |= NFP_FL_TCP_FLAG_URG;
208         }
209
210         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
211                 struct flow_match_control match;
212
213                 flow_rule_match_control(rule, &match);
214                 if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
215                         ext->flags |= NFP_FL_IP_FRAGMENTED;
216                 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT)
217                         msk->flags |= NFP_FL_IP_FRAGMENTED;
218                 if (match.key->flags & FLOW_DIS_FIRST_FRAG)
219                         ext->flags |= NFP_FL_IP_FRAG_FIRST;
220                 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
221                         msk->flags |= NFP_FL_IP_FRAG_FIRST;
222         }
223 }
224
225 static void
226 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
227                         struct nfp_flower_ipv4 *msk,
228                         struct flow_cls_offload *flow)
229 {
230         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
231         struct flow_match_ipv4_addrs match;
232
233         memset(ext, 0, sizeof(struct nfp_flower_ipv4));
234         memset(msk, 0, sizeof(struct nfp_flower_ipv4));
235
236         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
237                 flow_rule_match_ipv4_addrs(rule, &match);
238                 ext->ipv4_src = match.key->src;
239                 ext->ipv4_dst = match.key->dst;
240                 msk->ipv4_src = match.mask->src;
241                 msk->ipv4_dst = match.mask->dst;
242         }
243
244         nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
245 }
246
247 static void
248 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
249                         struct nfp_flower_ipv6 *msk,
250                         struct flow_cls_offload *flow)
251 {
252         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
253
254         memset(ext, 0, sizeof(struct nfp_flower_ipv6));
255         memset(msk, 0, sizeof(struct nfp_flower_ipv6));
256
257         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
258                 struct flow_match_ipv6_addrs match;
259
260                 flow_rule_match_ipv6_addrs(rule, &match);
261                 ext->ipv6_src = match.key->src;
262                 ext->ipv6_dst = match.key->dst;
263                 msk->ipv6_src = match.mask->src;
264                 msk->ipv6_dst = match.mask->dst;
265         }
266
267         nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
268 }
269
270 static int
271 nfp_flower_compile_geneve_opt(void *ext, void *msk,
272                               struct flow_cls_offload *flow)
273 {
274         struct flow_match_enc_opts match;
275
276         flow_rule_match_enc_opts(flow->rule, &match);
277         memcpy(ext, match.key->data, match.key->len);
278         memcpy(msk, match.mask->data, match.mask->len);
279
280         return 0;
281 }
282
283 static void
284 nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
285                                   struct nfp_flower_tun_ipv4 *msk,
286                                   struct flow_cls_offload *flow)
287 {
288         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
289
290         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
291                 struct flow_match_ipv4_addrs match;
292
293                 flow_rule_match_enc_ipv4_addrs(rule, &match);
294                 ext->src = match.key->src;
295                 ext->dst = match.key->dst;
296                 msk->src = match.mask->src;
297                 msk->dst = match.mask->dst;
298         }
299 }
300
301 static void
302 nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
303                               struct nfp_flower_tun_ip_ext *msk,
304                               struct flow_cls_offload *flow)
305 {
306         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
307
308         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
309                 struct flow_match_ip match;
310
311                 flow_rule_match_enc_ip(rule, &match);
312                 ext->tos = match.key->tos;
313                 ext->ttl = match.key->ttl;
314                 msk->tos = match.mask->tos;
315                 msk->ttl = match.mask->ttl;
316         }
317 }
318
319 static void
320 nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
321                                 struct nfp_flower_ipv4_gre_tun *msk,
322                                 struct flow_cls_offload *flow)
323 {
324         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
325
326         memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
327         memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
328
329         /* NVGRE is the only supported GRE tunnel type */
330         ext->ethertype = cpu_to_be16(ETH_P_TEB);
331         msk->ethertype = cpu_to_be16(~0);
332
333         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
334                 struct flow_match_enc_keyid match;
335
336                 flow_rule_match_enc_keyid(rule, &match);
337                 ext->tun_key = match.key->keyid;
338                 msk->tun_key = match.mask->keyid;
339
340                 ext->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
341                 msk->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
342         }
343
344         nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
345         nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
346 }
347
348 static void
349 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
350                                 struct nfp_flower_ipv4_udp_tun *msk,
351                                 struct flow_cls_offload *flow)
352 {
353         struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
354
355         memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
356         memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
357
358         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
359                 struct flow_match_enc_keyid match;
360                 u32 temp_vni;
361
362                 flow_rule_match_enc_keyid(rule, &match);
363                 temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
364                 ext->tun_id = cpu_to_be32(temp_vni);
365                 temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
366                 msk->tun_id = cpu_to_be32(temp_vni);
367         }
368
369         nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
370         nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
371 }
372
373 int nfp_flower_compile_flow_match(struct nfp_app *app,
374                                   struct flow_cls_offload *flow,
375                                   struct nfp_fl_key_ls *key_ls,
376                                   struct net_device *netdev,
377                                   struct nfp_fl_payload *nfp_flow,
378                                   enum nfp_flower_tun_type tun_type,
379                                   struct netlink_ext_ack *extack)
380 {
381         u32 port_id;
382         int err;
383         u8 *ext;
384         u8 *msk;
385
386         port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
387
388         memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
389         memset(nfp_flow->mask_data, 0, key_ls->key_size);
390
391         ext = nfp_flow->unmasked_data;
392         msk = nfp_flow->mask_data;
393
394         nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
395                                     (struct nfp_flower_meta_tci *)msk,
396                                     flow, key_ls->key_layer);
397         ext += sizeof(struct nfp_flower_meta_tci);
398         msk += sizeof(struct nfp_flower_meta_tci);
399
400         /* Populate Extended Metadata if Required. */
401         if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
402                 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
403                                             key_ls->key_layer_two);
404                 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
405                                             key_ls->key_layer_two);
406                 ext += sizeof(struct nfp_flower_ext_meta);
407                 msk += sizeof(struct nfp_flower_ext_meta);
408         }
409
410         /* Populate Exact Port data. */
411         err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
412                                       port_id, false, tun_type, extack);
413         if (err)
414                 return err;
415
416         /* Populate Mask Port Data. */
417         err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
418                                       port_id, true, tun_type, extack);
419         if (err)
420                 return err;
421
422         ext += sizeof(struct nfp_flower_in_port);
423         msk += sizeof(struct nfp_flower_in_port);
424
425         if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
426                 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
427                                        (struct nfp_flower_mac_mpls *)msk,
428                                        flow);
429                 ext += sizeof(struct nfp_flower_mac_mpls);
430                 msk += sizeof(struct nfp_flower_mac_mpls);
431         }
432
433         if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
434                 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
435                                          (struct nfp_flower_tp_ports *)msk,
436                                          flow);
437                 ext += sizeof(struct nfp_flower_tp_ports);
438                 msk += sizeof(struct nfp_flower_tp_ports);
439         }
440
441         if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
442                 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
443                                         (struct nfp_flower_ipv4 *)msk,
444                                         flow);
445                 ext += sizeof(struct nfp_flower_ipv4);
446                 msk += sizeof(struct nfp_flower_ipv4);
447         }
448
449         if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
450                 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
451                                         (struct nfp_flower_ipv6 *)msk,
452                                         flow);
453                 ext += sizeof(struct nfp_flower_ipv6);
454                 msk += sizeof(struct nfp_flower_ipv6);
455         }
456
457         if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
458                 __be32 tun_dst;
459
460                 nfp_flower_compile_ipv4_gre_tun((void *)ext, (void *)msk, flow);
461                 tun_dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
462                 ext += sizeof(struct nfp_flower_ipv4_gre_tun);
463                 msk += sizeof(struct nfp_flower_ipv4_gre_tun);
464
465                 /* Store the tunnel destination in the rule data.
466                  * This must be present and be an exact match.
467                  */
468                 nfp_flow->nfp_tun_ipv4_addr = tun_dst;
469                 nfp_tunnel_add_ipv4_off(app, tun_dst);
470         }
471
472         if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
473             key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
474                 __be32 tun_dst;
475
476                 nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
477                 tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
478                 ext += sizeof(struct nfp_flower_ipv4_udp_tun);
479                 msk += sizeof(struct nfp_flower_ipv4_udp_tun);
480
481                 /* Store the tunnel destination in the rule data.
482                  * This must be present and be an exact match.
483                  */
484                 nfp_flow->nfp_tun_ipv4_addr = tun_dst;
485                 nfp_tunnel_add_ipv4_off(app, tun_dst);
486
487                 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
488                         err = nfp_flower_compile_geneve_opt(ext, msk, flow);
489                         if (err)
490                                 return err;
491                 }
492         }
493
494         return 0;
495 }