Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_nve_vxlan.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/netdevice.h>
5 #include <linux/netlink.h>
6 #include <linux/random.h>
7 #include <net/vxlan.h>
8
9 #include "reg.h"
10 #include "spectrum.h"
11 #include "spectrum_nve.h"
12
13 /* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B)
14  *
15  * In the worst case - where we have a VLAN tag on the outer Ethernet
16  * header and IPv6 in overlay and underlay - we need to parse 128 bytes
17  */
18 #define MLXSW_SP_NVE_VXLAN_PARSING_DEPTH 128
19 #define MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH 96
20
21 #define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS      (VXLAN_F_UDP_ZERO_CSUM_TX | \
22                                                  VXLAN_F_LEARN)
23
24 static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
25                                            const struct net_device *dev,
26                                            struct netlink_ext_ack *extack)
27 {
28         struct vxlan_dev *vxlan = netdev_priv(dev);
29         struct vxlan_config *cfg = &vxlan->cfg;
30
31         if (cfg->saddr.sa.sa_family != AF_INET) {
32                 NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only IPv4 underlay is supported");
33                 return false;
34         }
35
36         if (vxlan_addr_multicast(&cfg->remote_ip)) {
37                 NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported");
38                 return false;
39         }
40
41         if (vxlan_addr_any(&cfg->saddr)) {
42                 NL_SET_ERR_MSG_MOD(extack, "VxLAN: Source address must be specified");
43                 return false;
44         }
45
46         if (cfg->remote_ifindex) {
47                 NL_SET_ERR_MSG_MOD(extack, "VxLAN: Local interface is not supported");
48                 return false;
49         }
50
51         if (cfg->port_min || cfg->port_max) {
52                 NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only default UDP source port range is supported");
53                 return false;
54         }
55
56         if (cfg->tos != 1) {
57                 NL_SET_ERR_MSG_MOD(extack, "VxLAN: TOS must be configured to inherit");
58                 return false;
59         }
60
61         if (cfg->flags & VXLAN_F_TTL_INHERIT) {
62                 NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to inherit");
63                 return false;
64         }
65
66         if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
67                 NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported");
68                 return false;
69         }
70
71         if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS) {
72                 NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
73                 return false;
74         }
75
76         if (cfg->ttl == 0) {
77                 NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to 0");
78                 return false;
79         }
80
81         if (cfg->label != 0) {
82                 NL_SET_ERR_MSG_MOD(extack, "VxLAN: Flow label must be configured to 0");
83                 return false;
84         }
85
86         return true;
87 }
88
89 static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
90                                       const struct net_device *dev,
91                                       struct mlxsw_sp_nve_config *config)
92 {
93         struct vxlan_dev *vxlan = netdev_priv(dev);
94         struct vxlan_config *cfg = &vxlan->cfg;
95
96         config->type = MLXSW_SP_NVE_TYPE_VXLAN;
97         config->ttl = cfg->ttl;
98         config->flowlabel = cfg->label;
99         config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0;
100         config->ul_tb_id = RT_TABLE_MAIN;
101         config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
102         config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
103         config->udp_dport = cfg->dst_port;
104 }
105
106 static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
107                                       unsigned int parsing_depth,
108                                       __be16 udp_dport)
109 {
110         char mprs_pl[MLXSW_REG_MPRS_LEN];
111
112         mlxsw_reg_mprs_pack(mprs_pl, parsing_depth, be16_to_cpu(udp_dport));
113         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
114 }
115
116 static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
117                                     __be16 udp_dport)
118 {
119         int parsing_depth = mlxsw_sp->nve->inc_parsing_depth_refs ?
120                                 MLXSW_SP_NVE_VXLAN_PARSING_DEPTH :
121                                 MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH;
122
123         return __mlxsw_sp_nve_parsing_set(mlxsw_sp, parsing_depth, udp_dport);
124 }
125
126 static int
127 __mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp,
128                                      __be16 udp_dport)
129 {
130         int err;
131
132         mlxsw_sp->nve->inc_parsing_depth_refs++;
133
134         err = mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport);
135         if (err)
136                 goto err_nve_parsing_set;
137         return 0;
138
139 err_nve_parsing_set:
140         mlxsw_sp->nve->inc_parsing_depth_refs--;
141         return err;
142 }
143
144 static void
145 __mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp,
146                                      __be16 udp_dport)
147 {
148         mlxsw_sp->nve->inc_parsing_depth_refs--;
149         mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport);
150 }
151
152 int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp)
153 {
154         __be16 udp_dport = mlxsw_sp->nve->config.udp_dport;
155
156         return __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, udp_dport);
157 }
158
159 void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp)
160 {
161         __be16 udp_dport = mlxsw_sp->nve->config.udp_dport;
162
163         __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, udp_dport);
164 }
165
166 static void
167 mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
168                                   const struct mlxsw_sp_nve_config *config)
169 {
170         u8 udp_sport;
171
172         mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
173                              config->ttl);
174         /* VxLAN driver's default UDP source port range is 32768 (0x8000)
175          * to 60999 (0xee47). Set the upper 8 bits of the UDP source port
176          * to a random number between 0x80 and 0xee
177          */
178         get_random_bytes(&udp_sport, sizeof(udp_sport));
179         udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
180         mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
181         mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
182 }
183
184 static int
185 mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
186                                const struct mlxsw_sp_nve_config *config)
187 {
188         char tngcr_pl[MLXSW_REG_TNGCR_LEN];
189         u16 ul_vr_id;
190         int err;
191
192         err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id,
193                                           &ul_vr_id);
194         if (err)
195                 return err;
196
197         mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
198         mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en);
199         mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id);
200
201         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
202 }
203
204 static void mlxsw_sp1_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
205 {
206         char tngcr_pl[MLXSW_REG_TNGCR_LEN];
207
208         mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
209
210         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
211 }
212
213 static int mlxsw_sp1_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
214                                         unsigned int tunnel_index)
215 {
216         char rtdp_pl[MLXSW_REG_RTDP_LEN];
217
218         mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
219
220         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
221 }
222
223 static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve,
224                                     const struct mlxsw_sp_nve_config *config)
225 {
226         struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
227         int err;
228
229         err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport);
230         if (err)
231                 return err;
232
233         err = mlxsw_sp1_nve_vxlan_config_set(mlxsw_sp, config);
234         if (err)
235                 goto err_config_set;
236
237         err = mlxsw_sp1_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index);
238         if (err)
239                 goto err_rtdp_set;
240
241         err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
242                                                 config->ul_proto,
243                                                 &config->ul_sip,
244                                                 nve->tunnel_index);
245         if (err)
246                 goto err_promote_decap;
247
248         return 0;
249
250 err_promote_decap:
251 err_rtdp_set:
252         mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
253 err_config_set:
254         __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
255         return err;
256 }
257
258 static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
259 {
260         struct mlxsw_sp_nve_config *config = &nve->config;
261         struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
262
263         mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
264                                          config->ul_proto, &config->ul_sip);
265         mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
266         __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
267 }
268
269 static int
270 mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni,
271                               struct netlink_ext_ack *extack)
272 {
273         if (WARN_ON(!netif_is_vxlan(nve_dev)))
274                 return -EINVAL;
275         return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier,
276                                 extack);
277 }
278
279 static void
280 mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
281 {
282         if (WARN_ON(!netif_is_vxlan(nve_dev)))
283                 return;
284         vxlan_fdb_clear_offload(nve_dev, vni);
285 }
286
287 const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
288         .type           = MLXSW_SP_NVE_TYPE_VXLAN,
289         .can_offload    = mlxsw_sp_nve_vxlan_can_offload,
290         .nve_config     = mlxsw_sp_nve_vxlan_config,
291         .init           = mlxsw_sp1_nve_vxlan_init,
292         .fini           = mlxsw_sp1_nve_vxlan_fini,
293         .fdb_replay     = mlxsw_sp_nve_vxlan_fdb_replay,
294         .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
295 };
296
297 static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
298                                              bool learning_en)
299 {
300         char tnpc_pl[MLXSW_REG_TNPC_LEN];
301
302         mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
303                             learning_en);
304         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl);
305 }
306
307 static int
308 mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
309                                const struct mlxsw_sp_nve_config *config)
310 {
311         char tngcr_pl[MLXSW_REG_TNGCR_LEN];
312         u16 ul_rif_index;
313         int err;
314
315         err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, config->ul_tb_id,
316                                          &ul_rif_index);
317         if (err)
318                 return err;
319         mlxsw_sp->nve->ul_rif_index = ul_rif_index;
320
321         err = mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, config->learning_en);
322         if (err)
323                 goto err_vxlan_learning_set;
324
325         mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
326         mlxsw_reg_tngcr_underlay_rif_set(tngcr_pl, ul_rif_index);
327
328         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
329         if (err)
330                 goto err_tngcr_write;
331
332         return 0;
333
334 err_tngcr_write:
335         mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
336 err_vxlan_learning_set:
337         mlxsw_sp_router_ul_rif_put(mlxsw_sp, ul_rif_index);
338         return err;
339 }
340
341 static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
342 {
343         char tngcr_pl[MLXSW_REG_TNGCR_LEN];
344
345         mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
346         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
347         mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
348         mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->nve->ul_rif_index);
349 }
350
351 static int mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
352                                         unsigned int tunnel_index,
353                                         u16 ul_rif_index)
354 {
355         char rtdp_pl[MLXSW_REG_RTDP_LEN];
356
357         mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
358         mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_index);
359
360         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
361 }
362
363 static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
364                                     const struct mlxsw_sp_nve_config *config)
365 {
366         struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
367         int err;
368
369         err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport);
370         if (err)
371                 return err;
372
373         err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config);
374         if (err)
375                 goto err_config_set;
376
377         err = mlxsw_sp2_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index,
378                                            nve->ul_rif_index);
379         if (err)
380                 goto err_rtdp_set;
381
382         err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
383                                                 config->ul_proto,
384                                                 &config->ul_sip,
385                                                 nve->tunnel_index);
386         if (err)
387                 goto err_promote_decap;
388
389         return 0;
390
391 err_promote_decap:
392 err_rtdp_set:
393         mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
394 err_config_set:
395         __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
396         return err;
397 }
398
399 static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
400 {
401         struct mlxsw_sp_nve_config *config = &nve->config;
402         struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
403
404         mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
405                                          config->ul_proto, &config->ul_sip);
406         mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
407         __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
408 }
409
410 const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
411         .type           = MLXSW_SP_NVE_TYPE_VXLAN,
412         .can_offload    = mlxsw_sp_nve_vxlan_can_offload,
413         .nve_config     = mlxsw_sp_nve_vxlan_config,
414         .init           = mlxsw_sp2_nve_vxlan_init,
415         .fini           = mlxsw_sp2_nve_vxlan_fini,
416         .fdb_replay     = mlxsw_sp_nve_vxlan_fdb_replay,
417         .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
418 };