1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/bitfield.h>
5 #include <net/pkt_cls.h>
11 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
12 struct nfp_flower_meta_tci *msk,
13 struct flow_cls_offload *flow, u8 key_type)
15 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
18 memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
19 memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
21 /* Populate the metadata frame. */
22 ext->nfp_flow_key_layer = key_type;
25 msk->nfp_flow_key_layer = key_type;
28 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
29 struct flow_match_vlan match;
31 flow_rule_match_vlan(rule, &match);
32 /* Populate the tci field. */
33 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
34 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
35 match.key->vlan_priority) |
36 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
38 ext->tci = cpu_to_be16(tmp_tci);
40 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
41 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
42 match.mask->vlan_priority) |
43 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
45 msk->tci = cpu_to_be16(tmp_tci);
50 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
52 frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
56 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
57 bool mask_version, enum nfp_flower_tun_type tun_type,
58 struct netlink_ext_ack *extack)
61 frame->in_port = cpu_to_be32(~0);
66 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
69 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload");
72 frame->in_port = cpu_to_be32(cmsg_port);
79 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
80 struct nfp_flower_mac_mpls *msk,
81 struct flow_cls_offload *flow)
83 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
85 memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
86 memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
88 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
89 struct flow_match_eth_addrs match;
91 flow_rule_match_eth_addrs(rule, &match);
92 /* Populate mac frame. */
93 ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
94 ether_addr_copy(ext->mac_src, &match.key->src[0]);
95 ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
96 ether_addr_copy(msk->mac_src, &match.mask->src[0]);
99 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
100 struct flow_match_mpls match;
103 flow_rule_match_mpls(rule, &match);
104 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) |
105 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) |
106 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) |
107 NFP_FLOWER_MASK_MPLS_Q;
108 ext->mpls_lse = cpu_to_be32(t_mpls);
109 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) |
110 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) |
111 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) |
112 NFP_FLOWER_MASK_MPLS_Q;
113 msk->mpls_lse = cpu_to_be32(t_mpls);
114 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
115 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
116 * bit, which indicates an mpls ether type but without any
119 struct flow_match_basic match;
121 flow_rule_match_basic(rule, &match);
122 if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
123 match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
124 ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
125 msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
131 nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
132 struct nfp_flower_tp_ports *msk,
133 struct flow_cls_offload *flow)
135 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
137 memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
138 memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
140 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
141 struct flow_match_ports match;
143 flow_rule_match_ports(rule, &match);
144 ext->port_src = match.key->src;
145 ext->port_dst = match.key->dst;
146 msk->port_src = match.mask->src;
147 msk->port_dst = match.mask->dst;
152 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
153 struct nfp_flower_ip_ext *msk,
154 struct flow_cls_offload *flow)
156 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
158 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
159 struct flow_match_basic match;
161 flow_rule_match_basic(rule, &match);
162 ext->proto = match.key->ip_proto;
163 msk->proto = match.mask->ip_proto;
166 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
167 struct flow_match_ip match;
169 flow_rule_match_ip(rule, &match);
170 ext->tos = match.key->tos;
171 ext->ttl = match.key->ttl;
172 msk->tos = match.mask->tos;
173 msk->ttl = match.mask->ttl;
176 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
177 u16 tcp_flags, tcp_flags_mask;
178 struct flow_match_tcp match;
180 flow_rule_match_tcp(rule, &match);
181 tcp_flags = be16_to_cpu(match.key->flags);
182 tcp_flags_mask = be16_to_cpu(match.mask->flags);
184 if (tcp_flags & TCPHDR_FIN)
185 ext->flags |= NFP_FL_TCP_FLAG_FIN;
186 if (tcp_flags_mask & TCPHDR_FIN)
187 msk->flags |= NFP_FL_TCP_FLAG_FIN;
189 if (tcp_flags & TCPHDR_SYN)
190 ext->flags |= NFP_FL_TCP_FLAG_SYN;
191 if (tcp_flags_mask & TCPHDR_SYN)
192 msk->flags |= NFP_FL_TCP_FLAG_SYN;
194 if (tcp_flags & TCPHDR_RST)
195 ext->flags |= NFP_FL_TCP_FLAG_RST;
196 if (tcp_flags_mask & TCPHDR_RST)
197 msk->flags |= NFP_FL_TCP_FLAG_RST;
199 if (tcp_flags & TCPHDR_PSH)
200 ext->flags |= NFP_FL_TCP_FLAG_PSH;
201 if (tcp_flags_mask & TCPHDR_PSH)
202 msk->flags |= NFP_FL_TCP_FLAG_PSH;
204 if (tcp_flags & TCPHDR_URG)
205 ext->flags |= NFP_FL_TCP_FLAG_URG;
206 if (tcp_flags_mask & TCPHDR_URG)
207 msk->flags |= NFP_FL_TCP_FLAG_URG;
210 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
211 struct flow_match_control match;
213 flow_rule_match_control(rule, &match);
214 if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
215 ext->flags |= NFP_FL_IP_FRAGMENTED;
216 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT)
217 msk->flags |= NFP_FL_IP_FRAGMENTED;
218 if (match.key->flags & FLOW_DIS_FIRST_FRAG)
219 ext->flags |= NFP_FL_IP_FRAG_FIRST;
220 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
221 msk->flags |= NFP_FL_IP_FRAG_FIRST;
226 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
227 struct nfp_flower_ipv4 *msk,
228 struct flow_cls_offload *flow)
230 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
231 struct flow_match_ipv4_addrs match;
233 memset(ext, 0, sizeof(struct nfp_flower_ipv4));
234 memset(msk, 0, sizeof(struct nfp_flower_ipv4));
236 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
237 flow_rule_match_ipv4_addrs(rule, &match);
238 ext->ipv4_src = match.key->src;
239 ext->ipv4_dst = match.key->dst;
240 msk->ipv4_src = match.mask->src;
241 msk->ipv4_dst = match.mask->dst;
244 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
248 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
249 struct nfp_flower_ipv6 *msk,
250 struct flow_cls_offload *flow)
252 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
254 memset(ext, 0, sizeof(struct nfp_flower_ipv6));
255 memset(msk, 0, sizeof(struct nfp_flower_ipv6));
257 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
258 struct flow_match_ipv6_addrs match;
260 flow_rule_match_ipv6_addrs(rule, &match);
261 ext->ipv6_src = match.key->src;
262 ext->ipv6_dst = match.key->dst;
263 msk->ipv6_src = match.mask->src;
264 msk->ipv6_dst = match.mask->dst;
267 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
271 nfp_flower_compile_geneve_opt(void *ext, void *msk,
272 struct flow_cls_offload *flow)
274 struct flow_match_enc_opts match;
276 flow_rule_match_enc_opts(flow->rule, &match);
277 memcpy(ext, match.key->data, match.key->len);
278 memcpy(msk, match.mask->data, match.mask->len);
284 nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
285 struct nfp_flower_tun_ipv4 *msk,
286 struct flow_cls_offload *flow)
288 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
290 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
291 struct flow_match_ipv4_addrs match;
293 flow_rule_match_enc_ipv4_addrs(rule, &match);
294 ext->src = match.key->src;
295 ext->dst = match.key->dst;
296 msk->src = match.mask->src;
297 msk->dst = match.mask->dst;
302 nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
303 struct nfp_flower_tun_ip_ext *msk,
304 struct flow_cls_offload *flow)
306 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
308 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
309 struct flow_match_ip match;
311 flow_rule_match_enc_ip(rule, &match);
312 ext->tos = match.key->tos;
313 ext->ttl = match.key->ttl;
314 msk->tos = match.mask->tos;
315 msk->ttl = match.mask->ttl;
320 nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
321 struct nfp_flower_ipv4_gre_tun *msk,
322 struct flow_cls_offload *flow)
324 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
326 memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
327 memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
329 /* NVGRE is the only supported GRE tunnel type */
330 ext->ethertype = cpu_to_be16(ETH_P_TEB);
331 msk->ethertype = cpu_to_be16(~0);
333 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
334 struct flow_match_enc_keyid match;
336 flow_rule_match_enc_keyid(rule, &match);
337 ext->tun_key = match.key->keyid;
338 msk->tun_key = match.mask->keyid;
340 ext->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
341 msk->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
344 nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
345 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
349 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
350 struct nfp_flower_ipv4_udp_tun *msk,
351 struct flow_cls_offload *flow)
353 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
355 memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
356 memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
358 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
359 struct flow_match_enc_keyid match;
362 flow_rule_match_enc_keyid(rule, &match);
363 temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
364 ext->tun_id = cpu_to_be32(temp_vni);
365 temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
366 msk->tun_id = cpu_to_be32(temp_vni);
369 nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
370 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
373 int nfp_flower_compile_flow_match(struct nfp_app *app,
374 struct flow_cls_offload *flow,
375 struct nfp_fl_key_ls *key_ls,
376 struct net_device *netdev,
377 struct nfp_fl_payload *nfp_flow,
378 enum nfp_flower_tun_type tun_type,
379 struct netlink_ext_ack *extack)
386 port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
388 memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
389 memset(nfp_flow->mask_data, 0, key_ls->key_size);
391 ext = nfp_flow->unmasked_data;
392 msk = nfp_flow->mask_data;
394 nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
395 (struct nfp_flower_meta_tci *)msk,
396 flow, key_ls->key_layer);
397 ext += sizeof(struct nfp_flower_meta_tci);
398 msk += sizeof(struct nfp_flower_meta_tci);
400 /* Populate Extended Metadata if Required. */
401 if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
402 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
403 key_ls->key_layer_two);
404 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
405 key_ls->key_layer_two);
406 ext += sizeof(struct nfp_flower_ext_meta);
407 msk += sizeof(struct nfp_flower_ext_meta);
410 /* Populate Exact Port data. */
411 err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
412 port_id, false, tun_type, extack);
416 /* Populate Mask Port Data. */
417 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
418 port_id, true, tun_type, extack);
422 ext += sizeof(struct nfp_flower_in_port);
423 msk += sizeof(struct nfp_flower_in_port);
425 if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
426 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
427 (struct nfp_flower_mac_mpls *)msk,
429 ext += sizeof(struct nfp_flower_mac_mpls);
430 msk += sizeof(struct nfp_flower_mac_mpls);
433 if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
434 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
435 (struct nfp_flower_tp_ports *)msk,
437 ext += sizeof(struct nfp_flower_tp_ports);
438 msk += sizeof(struct nfp_flower_tp_ports);
441 if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
442 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
443 (struct nfp_flower_ipv4 *)msk,
445 ext += sizeof(struct nfp_flower_ipv4);
446 msk += sizeof(struct nfp_flower_ipv4);
449 if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
450 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
451 (struct nfp_flower_ipv6 *)msk,
453 ext += sizeof(struct nfp_flower_ipv6);
454 msk += sizeof(struct nfp_flower_ipv6);
457 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
460 nfp_flower_compile_ipv4_gre_tun((void *)ext, (void *)msk, flow);
461 tun_dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
462 ext += sizeof(struct nfp_flower_ipv4_gre_tun);
463 msk += sizeof(struct nfp_flower_ipv4_gre_tun);
465 /* Store the tunnel destination in the rule data.
466 * This must be present and be an exact match.
468 nfp_flow->nfp_tun_ipv4_addr = tun_dst;
469 nfp_tunnel_add_ipv4_off(app, tun_dst);
472 if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
473 key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
476 nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
477 tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
478 ext += sizeof(struct nfp_flower_ipv4_udp_tun);
479 msk += sizeof(struct nfp_flower_ipv4_udp_tun);
481 /* Store the tunnel destination in the rule data.
482 * This must be present and be an exact match.
484 nfp_flow->nfp_tun_ipv4_addr = tun_dst;
485 nfp_tunnel_add_ipv4_off(app, tun_dst);
487 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
488 err = nfp_flower_compile_geneve_opt(ext, msk, flow);