2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17 #include <linux/udp.h>
18 #include <linux/igmp.h>
19 #include <linux/if_ether.h>
20 #include <linux/ethtool.h>
22 #include <net/ndisc.h>
25 #include <net/rtnetlink.h>
26 #include <net/inet_ecn.h>
27 #include <net/net_namespace.h>
28 #include <net/netns/generic.h>
29 #include <net/vxlan.h>
31 #if IS_ENABLED(CONFIG_IPV6)
32 #include <net/ip6_tunnel.h>
33 #include <net/ip6_checksum.h>
36 #define VXLAN_VERSION "0.1"
38 #define PORT_HASH_BITS 8
39 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
40 #define FDB_AGE_DEFAULT 300 /* 5 min */
41 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
43 /* UDP port for VXLAN traffic.
44 * The IANA assigned port is 4789, but the Linux default is 8472
45 * for compatibility with early adopters.
47 static unsigned short vxlan_port __read_mostly = 8472;
48 module_param_named(udp_port, vxlan_port, ushort, 0444);
49 MODULE_PARM_DESC(udp_port, "Destination UDP port");
51 static bool log_ecn_error = true;
52 module_param(log_ecn_error, bool, 0644);
53 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
55 static int vxlan_net_id;
56 static struct rtnl_link_ops vxlan_link_ops;
58 static const u8 all_zeros_mac[ETH_ALEN + 2];
60 static int vxlan_sock_add(struct vxlan_dev *vxlan);
62 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
64 /* per-network namespace private data for this module */
66 struct list_head vxlan_list;
67 struct hlist_head sock_list[PORT_HASH_SIZE];
71 /* Forwarding table entry */
73 struct hlist_node hlist; /* linked list of entries */
75 unsigned long updated; /* jiffies */
77 struct list_head remotes;
78 u8 eth_addr[ETH_ALEN];
79 u16 state; /* see ndm_state */
80 u8 flags; /* see ndm_flags */
83 /* salt for hash table */
84 static u32 vxlan_salt __read_mostly;
86 static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
88 return vs->flags & VXLAN_F_COLLECT_METADATA ||
89 ip_tunnel_collect_metadata();
92 #if IS_ENABLED(CONFIG_IPV6)
94 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
96 if (a->sa.sa_family != b->sa.sa_family)
98 if (a->sa.sa_family == AF_INET6)
99 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
101 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
104 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
106 if (ipa->sa.sa_family == AF_INET6)
107 return ipv6_addr_any(&ipa->sin6.sin6_addr);
109 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
112 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
114 if (ipa->sa.sa_family == AF_INET6)
115 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
117 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
120 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
122 if (nla_len(nla) >= sizeof(struct in6_addr)) {
123 ip->sin6.sin6_addr = nla_get_in6_addr(nla);
124 ip->sa.sa_family = AF_INET6;
126 } else if (nla_len(nla) >= sizeof(__be32)) {
127 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
128 ip->sa.sa_family = AF_INET;
131 return -EAFNOSUPPORT;
135 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
136 const union vxlan_addr *ip)
138 if (ip->sa.sa_family == AF_INET6)
139 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
141 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
144 #else /* !CONFIG_IPV6 */
147 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
149 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
152 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
154 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
157 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
159 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
162 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
164 if (nla_len(nla) >= sizeof(struct in6_addr)) {
165 return -EAFNOSUPPORT;
166 } else if (nla_len(nla) >= sizeof(__be32)) {
167 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
168 ip->sa.sa_family = AF_INET;
171 return -EAFNOSUPPORT;
175 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
176 const union vxlan_addr *ip)
178 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
182 /* Virtual Network hash table head */
183 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
185 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
188 /* Socket hash table head */
189 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
191 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
193 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
196 /* First remote destination for a forwarding entry.
197 * Guaranteed to be non-NULL because remotes are never deleted.
199 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
201 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
204 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
206 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
209 /* Find VXLAN socket based on network namespace, address family and UDP port
210 * and enabled unshareable flags.
212 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
213 __be16 port, u32 flags)
215 struct vxlan_sock *vs;
217 flags &= VXLAN_F_RCV_FLAGS;
219 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
220 if (inet_sk(vs->sock->sk)->inet_sport == port &&
221 vxlan_get_sk_family(vs) == family &&
228 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
230 struct vxlan_dev_node *node;
232 /* For flow based devices, map all packets to VNI 0 */
233 if (vs->flags & VXLAN_F_COLLECT_METADATA)
236 hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
237 if (node->vxlan->default_dst.remote_vni == vni)
244 /* Look up VNI in a per net namespace table */
245 static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni,
246 sa_family_t family, __be16 port,
249 struct vxlan_sock *vs;
251 vs = vxlan_find_sock(net, family, port, flags);
255 return vxlan_vs_find_vni(vs, vni);
258 /* Fill in neighbour message in skbuff. */
259 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
260 const struct vxlan_fdb *fdb,
261 u32 portid, u32 seq, int type, unsigned int flags,
262 const struct vxlan_rdst *rdst)
264 unsigned long now = jiffies;
265 struct nda_cacheinfo ci;
266 struct nlmsghdr *nlh;
268 bool send_ip, send_eth;
270 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
274 ndm = nlmsg_data(nlh);
275 memset(ndm, 0, sizeof(*ndm));
277 send_eth = send_ip = true;
279 if (type == RTM_GETNEIGH) {
280 ndm->ndm_family = AF_INET;
281 send_ip = !vxlan_addr_any(&rdst->remote_ip);
282 send_eth = !is_zero_ether_addr(fdb->eth_addr);
284 ndm->ndm_family = AF_BRIDGE;
285 ndm->ndm_state = fdb->state;
286 ndm->ndm_ifindex = vxlan->dev->ifindex;
287 ndm->ndm_flags = fdb->flags;
288 ndm->ndm_type = RTN_UNICAST;
290 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
291 nla_put_s32(skb, NDA_LINK_NETNSID,
292 peernet2id(dev_net(vxlan->dev), vxlan->net)))
293 goto nla_put_failure;
295 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
296 goto nla_put_failure;
298 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
299 goto nla_put_failure;
301 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
302 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
303 goto nla_put_failure;
304 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
305 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
306 goto nla_put_failure;
307 if (rdst->remote_ifindex &&
308 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
309 goto nla_put_failure;
311 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
312 ci.ndm_confirmed = 0;
313 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
316 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
317 goto nla_put_failure;
323 nlmsg_cancel(skb, nlh);
327 static inline size_t vxlan_nlmsg_size(void)
329 return NLMSG_ALIGN(sizeof(struct ndmsg))
330 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
331 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
332 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
333 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
334 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
335 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
336 + nla_total_size(sizeof(struct nda_cacheinfo));
339 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
340 struct vxlan_rdst *rd, int type)
342 struct net *net = dev_net(vxlan->dev);
346 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
350 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
352 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
353 WARN_ON(err == -EMSGSIZE);
358 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
362 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
365 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
367 struct vxlan_dev *vxlan = netdev_priv(dev);
368 struct vxlan_fdb f = {
371 struct vxlan_rdst remote = {
372 .remote_ip = *ipa, /* goes to NDA_DST */
373 .remote_vni = cpu_to_be32(VXLAN_N_VID),
376 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
379 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
381 struct vxlan_fdb f = {
384 struct vxlan_rdst remote = { };
386 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
388 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
391 /* Hash Ethernet address */
392 static u32 eth_hash(const unsigned char *addr)
394 u64 value = get_unaligned((u64 *)addr);
396 /* only want 6 bytes */
402 return hash_64(value, FDB_HASH_BITS);
405 /* Hash chain to use given mac address */
406 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
409 return &vxlan->fdb_head[eth_hash(mac)];
412 /* Look up Ethernet address in forwarding table */
413 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
416 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
419 hlist_for_each_entry_rcu(f, head, hlist) {
420 if (ether_addr_equal(mac, f->eth_addr))
427 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
432 f = __vxlan_find_mac(vxlan, mac);
439 /* caller should hold vxlan->hash_lock */
440 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
441 union vxlan_addr *ip, __be16 port,
442 __be32 vni, __u32 ifindex)
444 struct vxlan_rdst *rd;
446 list_for_each_entry(rd, &f->remotes, list) {
447 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
448 rd->remote_port == port &&
449 rd->remote_vni == vni &&
450 rd->remote_ifindex == ifindex)
457 /* Replace destination of unicast mac */
458 static int vxlan_fdb_replace(struct vxlan_fdb *f,
459 union vxlan_addr *ip, __be16 port, __be32 vni,
462 struct vxlan_rdst *rd;
464 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
468 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
472 dst_cache_reset(&rd->dst_cache);
474 rd->remote_port = port;
475 rd->remote_vni = vni;
476 rd->remote_ifindex = ifindex;
480 /* Add/update destinations for multicast */
481 static int vxlan_fdb_append(struct vxlan_fdb *f,
482 union vxlan_addr *ip, __be16 port, __be32 vni,
483 __u32 ifindex, struct vxlan_rdst **rdp)
485 struct vxlan_rdst *rd;
487 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
491 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
495 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
501 rd->remote_port = port;
502 rd->remote_vni = vni;
503 rd->remote_ifindex = ifindex;
505 list_add_tail_rcu(&rd->list, &f->remotes);
511 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
513 struct vxlanhdr *vh, size_t hdrlen,
515 struct gro_remcsum *grc,
518 size_t start, offset;
520 if (skb->remcsum_offload)
523 if (!NAPI_GRO_CB(skb)->csum_valid)
526 start = vxlan_rco_start(vni_field);
527 offset = start + vxlan_rco_offset(vni_field);
529 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
530 start, offset, grc, nopartial);
532 skb->remcsum_offload = 1;
537 static struct sk_buff **vxlan_gro_receive(struct sock *sk,
538 struct sk_buff **head,
541 struct sk_buff *p, **pp = NULL;
542 struct vxlanhdr *vh, *vh2;
543 unsigned int hlen, off_vx;
545 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
547 struct gro_remcsum grc;
549 skb_gro_remcsum_init(&grc);
551 off_vx = skb_gro_offset(skb);
552 hlen = off_vx + sizeof(*vh);
553 vh = skb_gro_header_fast(skb, off_vx);
554 if (skb_gro_header_hard(skb, hlen)) {
555 vh = skb_gro_header_slow(skb, hlen, off_vx);
560 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
562 flags = vh->vx_flags;
564 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
565 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
568 VXLAN_F_REMCSUM_NOPARTIAL));
574 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
576 for (p = *head; p; p = p->next) {
577 if (!NAPI_GRO_CB(p)->same_flow)
580 vh2 = (struct vxlanhdr *)(p->data + off_vx);
581 if (vh->vx_flags != vh2->vx_flags ||
582 vh->vx_vni != vh2->vx_vni) {
583 NAPI_GRO_CB(p)->same_flow = 0;
588 pp = call_gro_receive(eth_gro_receive, head, skb);
592 skb_gro_remcsum_cleanup(skb, &grc);
593 NAPI_GRO_CB(skb)->flush |= flush;
598 static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
600 /* Sets 'skb->inner_mac_header' since we are always called with
601 * 'skb->encapsulation' set.
603 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
606 /* Add new entry to forwarding table -- assumes lock held */
607 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
608 const u8 *mac, union vxlan_addr *ip,
609 __u16 state, __u16 flags,
610 __be16 port, __be32 vni, __u32 ifindex,
613 struct vxlan_rdst *rd = NULL;
618 f = __vxlan_find_mac(vxlan, mac);
620 if (flags & NLM_F_EXCL) {
621 netdev_dbg(vxlan->dev,
622 "lost race to create %pM\n", mac);
625 if (f->state != state) {
627 f->updated = jiffies;
630 if (f->flags != ndm_flags) {
631 f->flags = ndm_flags;
632 f->updated = jiffies;
635 if ((flags & NLM_F_REPLACE)) {
636 /* Only change unicasts */
637 if (!(is_multicast_ether_addr(f->eth_addr) ||
638 is_zero_ether_addr(f->eth_addr))) {
639 notify |= vxlan_fdb_replace(f, ip, port, vni,
644 if ((flags & NLM_F_APPEND) &&
645 (is_multicast_ether_addr(f->eth_addr) ||
646 is_zero_ether_addr(f->eth_addr))) {
647 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
654 if (!(flags & NLM_F_CREATE))
657 if (vxlan->cfg.addrmax &&
658 vxlan->addrcnt >= vxlan->cfg.addrmax)
661 /* Disallow replace to add a multicast entry */
662 if ((flags & NLM_F_REPLACE) &&
663 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
666 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
667 f = kmalloc(sizeof(*f), GFP_ATOMIC);
673 f->flags = ndm_flags;
674 f->updated = f->used = jiffies;
675 INIT_LIST_HEAD(&f->remotes);
676 memcpy(f->eth_addr, mac, ETH_ALEN);
678 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
685 hlist_add_head_rcu(&f->hlist,
686 vxlan_fdb_head(vxlan, mac));
691 rd = first_remote_rtnl(f);
692 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
698 static void vxlan_fdb_free(struct rcu_head *head)
700 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
701 struct vxlan_rdst *rd, *nd;
703 list_for_each_entry_safe(rd, nd, &f->remotes, list) {
704 dst_cache_destroy(&rd->dst_cache);
710 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
712 netdev_dbg(vxlan->dev,
713 "delete %pM\n", f->eth_addr);
716 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
718 hlist_del_rcu(&f->hlist);
719 call_rcu(&f->rcu, vxlan_fdb_free);
722 static void vxlan_dst_free(struct rcu_head *head)
724 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
726 dst_cache_destroy(&rd->dst_cache);
730 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
731 struct vxlan_rdst *rd)
733 list_del_rcu(&rd->list);
734 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
735 call_rcu(&rd->rcu, vxlan_dst_free);
738 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
739 union vxlan_addr *ip, __be16 *port, __be32 *vni,
742 struct net *net = dev_net(vxlan->dev);
746 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
750 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
751 if (remote->sa.sa_family == AF_INET) {
752 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
753 ip->sa.sa_family = AF_INET;
754 #if IS_ENABLED(CONFIG_IPV6)
756 ip->sin6.sin6_addr = in6addr_any;
757 ip->sa.sa_family = AF_INET6;
763 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
765 *port = nla_get_be16(tb[NDA_PORT]);
767 *port = vxlan->cfg.dst_port;
771 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
773 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
775 *vni = vxlan->default_dst.remote_vni;
778 if (tb[NDA_IFINDEX]) {
779 struct net_device *tdev;
781 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
783 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
784 tdev = __dev_get_by_index(net, *ifindex);
786 return -EADDRNOTAVAIL;
794 /* Add static entry (via netlink) */
795 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
796 struct net_device *dev,
797 const unsigned char *addr, u16 vid, u16 flags)
799 struct vxlan_dev *vxlan = netdev_priv(dev);
800 /* struct net *net = dev_net(vxlan->dev); */
807 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
808 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
813 if (tb[NDA_DST] == NULL)
816 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
820 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
821 return -EAFNOSUPPORT;
823 spin_lock_bh(&vxlan->hash_lock);
824 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
825 port, vni, ifindex, ndm->ndm_flags);
826 spin_unlock_bh(&vxlan->hash_lock);
831 /* Delete entry (via netlink) */
832 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
833 struct net_device *dev,
834 const unsigned char *addr, u16 vid)
836 struct vxlan_dev *vxlan = netdev_priv(dev);
838 struct vxlan_rdst *rd = NULL;
845 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
851 spin_lock_bh(&vxlan->hash_lock);
852 f = vxlan_find_mac(vxlan, addr);
856 if (!vxlan_addr_any(&ip)) {
857 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
864 /* remove a destination if it's not the only one on the list,
865 * otherwise destroy the fdb entry
867 if (rd && !list_is_singular(&f->remotes)) {
868 vxlan_fdb_dst_destroy(vxlan, f, rd);
872 vxlan_fdb_destroy(vxlan, f);
875 spin_unlock_bh(&vxlan->hash_lock);
880 /* Dump forwarding table */
881 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
882 struct net_device *dev,
883 struct net_device *filter_dev, int *idx)
885 struct vxlan_dev *vxlan = netdev_priv(dev);
889 for (h = 0; h < FDB_HASH_SIZE; ++h) {
892 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
893 struct vxlan_rdst *rd;
895 list_for_each_entry_rcu(rd, &f->remotes, list) {
896 if (*idx < cb->args[2])
899 err = vxlan_fdb_info(skb, vxlan, f,
900 NETLINK_CB(cb->skb).portid,
915 /* Watch incoming packets to learn mapping between Ethernet address
916 * and Tunnel endpoint.
917 * Return true if packet is bogus and should be dropped.
919 static bool vxlan_snoop(struct net_device *dev,
920 union vxlan_addr *src_ip, const u8 *src_mac)
922 struct vxlan_dev *vxlan = netdev_priv(dev);
925 f = vxlan_find_mac(vxlan, src_mac);
927 struct vxlan_rdst *rdst = first_remote_rcu(f);
929 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
932 /* Don't migrate static entries, drop packets */
933 if (f->state & NUD_NOARP)
938 "%pM migrated from %pIS to %pIS\n",
939 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
941 rdst->remote_ip = *src_ip;
942 f->updated = jiffies;
943 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
945 /* learned new entry */
946 spin_lock(&vxlan->hash_lock);
948 /* close off race between vxlan_flush and incoming packets */
949 if (netif_running(dev))
950 vxlan_fdb_create(vxlan, src_mac, src_ip,
952 NLM_F_EXCL|NLM_F_CREATE,
954 vxlan->default_dst.remote_vni,
956 spin_unlock(&vxlan->hash_lock);
962 /* See if multicast group is already in use by other ID */
963 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
965 struct vxlan_dev *vxlan;
966 struct vxlan_sock *sock4;
967 #if IS_ENABLED(CONFIG_IPV6)
968 struct vxlan_sock *sock6;
970 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
972 sock4 = rtnl_dereference(dev->vn4_sock);
974 /* The vxlan_sock is only used by dev, leaving group has
975 * no effect on other vxlan devices.
977 if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1)
979 #if IS_ENABLED(CONFIG_IPV6)
980 sock6 = rtnl_dereference(dev->vn6_sock);
981 if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1)
985 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
986 if (!netif_running(vxlan->dev) || vxlan == dev)
989 if (family == AF_INET &&
990 rtnl_dereference(vxlan->vn4_sock) != sock4)
992 #if IS_ENABLED(CONFIG_IPV6)
993 if (family == AF_INET6 &&
994 rtnl_dereference(vxlan->vn6_sock) != sock6)
998 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
999 &dev->default_dst.remote_ip))
1002 if (vxlan->default_dst.remote_ifindex !=
1003 dev->default_dst.remote_ifindex)
1012 static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
1014 struct vxlan_net *vn;
1018 if (!atomic_dec_and_test(&vs->refcnt))
1021 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
1022 spin_lock(&vn->sock_lock);
1023 hlist_del_rcu(&vs->hlist);
1024 udp_tunnel_notify_del_rx_port(vs->sock,
1025 (vs->flags & VXLAN_F_GPE) ?
1026 UDP_TUNNEL_TYPE_VXLAN_GPE :
1027 UDP_TUNNEL_TYPE_VXLAN);
1028 spin_unlock(&vn->sock_lock);
1033 static void vxlan_sock_release(struct vxlan_dev *vxlan)
1035 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1036 #if IS_ENABLED(CONFIG_IPV6)
1037 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1039 rcu_assign_pointer(vxlan->vn6_sock, NULL);
1042 rcu_assign_pointer(vxlan->vn4_sock, NULL);
1045 vxlan_vs_del_dev(vxlan);
1047 if (__vxlan_sock_release_prep(sock4)) {
1048 udp_tunnel_sock_release(sock4->sock);
1052 #if IS_ENABLED(CONFIG_IPV6)
1053 if (__vxlan_sock_release_prep(sock6)) {
1054 udp_tunnel_sock_release(sock6->sock);
1060 /* Update multicast group membership when first VNI on
1061 * multicast address is brought up
1063 static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1066 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1067 int ifindex = vxlan->default_dst.remote_ifindex;
1070 if (ip->sa.sa_family == AF_INET) {
1071 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1072 struct ip_mreqn mreq = {
1073 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1074 .imr_ifindex = ifindex,
1077 sk = sock4->sock->sk;
1079 ret = ip_mc_join_group(sk, &mreq);
1081 #if IS_ENABLED(CONFIG_IPV6)
1083 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1085 sk = sock6->sock->sk;
1087 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1088 &ip->sin6.sin6_addr);
1096 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1097 static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1100 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1101 int ifindex = vxlan->default_dst.remote_ifindex;
1104 if (ip->sa.sa_family == AF_INET) {
1105 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1106 struct ip_mreqn mreq = {
1107 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1108 .imr_ifindex = ifindex,
1111 sk = sock4->sock->sk;
1113 ret = ip_mc_leave_group(sk, &mreq);
1115 #if IS_ENABLED(CONFIG_IPV6)
1117 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1119 sk = sock6->sock->sk;
1121 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1122 &ip->sin6.sin6_addr);
1130 static bool vxlan_remcsum(struct vxlanhdr *unparsed,
1131 struct sk_buff *skb, u32 vxflags)
1133 size_t start, offset;
1135 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
1138 start = vxlan_rco_start(unparsed->vx_vni);
1139 offset = start + vxlan_rco_offset(unparsed->vx_vni);
1141 if (!pskb_may_pull(skb, offset + sizeof(u16)))
1144 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
1145 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
1147 unparsed->vx_flags &= ~VXLAN_HF_RCO;
1148 unparsed->vx_vni &= VXLAN_VNI_MASK;
1152 static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
1153 struct sk_buff *skb, u32 vxflags,
1154 struct vxlan_metadata *md)
1156 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
1157 struct metadata_dst *tun_dst;
1159 if (!(unparsed->vx_flags & VXLAN_HF_GBP))
1162 md->gbp = ntohs(gbp->policy_id);
1164 tun_dst = (struct metadata_dst *)skb_dst(skb);
1166 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
1167 tun_dst->u.tun_info.options_len = sizeof(*md);
1169 if (gbp->dont_learn)
1170 md->gbp |= VXLAN_GBP_DONT_LEARN;
1172 if (gbp->policy_applied)
1173 md->gbp |= VXLAN_GBP_POLICY_APPLIED;
1175 /* In flow-based mode, GBP is carried in dst_metadata */
1176 if (!(vxflags & VXLAN_F_COLLECT_METADATA))
1177 skb->mark = md->gbp;
1179 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
1182 static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
1184 struct sk_buff *skb, u32 vxflags)
1186 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
1188 /* Need to have Next Protocol set for interfaces in GPE mode. */
1189 if (!gpe->np_applied)
1191 /* "The initial version is 0. If a receiver does not support the
1192 * version indicated it MUST drop the packet.
1194 if (gpe->version != 0)
1196 /* "When the O bit is set to 1, the packet is an OAM packet and OAM
1197 * processing MUST occur." However, we don't implement OAM
1198 * processing, thus drop the packet.
1203 switch (gpe->next_protocol) {
1204 case VXLAN_GPE_NP_IPV4:
1205 *protocol = htons(ETH_P_IP);
1207 case VXLAN_GPE_NP_IPV6:
1208 *protocol = htons(ETH_P_IPV6);
1210 case VXLAN_GPE_NP_ETHERNET:
1211 *protocol = htons(ETH_P_TEB);
1217 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
1221 static bool vxlan_set_mac(struct vxlan_dev *vxlan,
1222 struct vxlan_sock *vs,
1223 struct sk_buff *skb)
1225 union vxlan_addr saddr;
1227 skb_reset_mac_header(skb);
1228 skb->protocol = eth_type_trans(skb, vxlan->dev);
1229 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1231 /* Ignore packet loops (and multicast echo) */
1232 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1235 /* Get address from the outer IP header */
1236 if (vxlan_get_sk_family(vs) == AF_INET) {
1237 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
1238 saddr.sa.sa_family = AF_INET;
1239 #if IS_ENABLED(CONFIG_IPV6)
1241 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
1242 saddr.sa.sa_family = AF_INET6;
1246 if ((vxlan->flags & VXLAN_F_LEARN) &&
1247 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
1253 static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
1254 struct sk_buff *skb)
1258 if (vxlan_get_sk_family(vs) == AF_INET)
1259 err = IP_ECN_decapsulate(oiph, skb);
1260 #if IS_ENABLED(CONFIG_IPV6)
1262 err = IP6_ECN_decapsulate(oiph, skb);
1265 if (unlikely(err) && log_ecn_error) {
1266 if (vxlan_get_sk_family(vs) == AF_INET)
1267 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1268 &((struct iphdr *)oiph)->saddr,
1269 ((struct iphdr *)oiph)->tos);
1271 net_info_ratelimited("non-ECT from %pI6\n",
1272 &((struct ipv6hdr *)oiph)->saddr);
1277 /* Callback from net/ipv4/udp.c to receive packets */
1278 static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1280 struct pcpu_sw_netstats *stats;
1281 struct vxlan_dev *vxlan;
1282 struct vxlan_sock *vs;
1283 struct vxlanhdr unparsed;
1284 struct vxlan_metadata _md;
1285 struct vxlan_metadata *md = &_md;
1286 __be16 protocol = htons(ETH_P_TEB);
1287 bool raw_proto = false;
1290 /* Need UDP and VXLAN header to be present */
1291 if (!pskb_may_pull(skb, VXLAN_HLEN))
1294 unparsed = *vxlan_hdr(skb);
1295 /* VNI flag always required to be set */
1296 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
1297 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1298 ntohl(vxlan_hdr(skb)->vx_flags),
1299 ntohl(vxlan_hdr(skb)->vx_vni));
1300 /* Return non vxlan pkt */
1303 unparsed.vx_flags &= ~VXLAN_HF_VNI;
1304 unparsed.vx_vni &= ~VXLAN_VNI_MASK;
1306 vs = rcu_dereference_sk_user_data(sk);
1310 vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni));
1314 /* For backwards compatibility, only allow reserved fields to be
1315 * used by VXLAN extensions if explicitly requested.
1317 if (vs->flags & VXLAN_F_GPE) {
1318 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
1323 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
1324 !net_eq(vxlan->net, dev_net(vxlan->dev))))
1327 if (vxlan_collect_metadata(vs)) {
1328 __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
1329 struct metadata_dst *tun_dst;
1331 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
1332 key32_to_tunnel_id(vni), sizeof(*md));
1337 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
1339 skb_dst_set(skb, (struct dst_entry *)tun_dst);
1341 memset(md, 0, sizeof(*md));
1344 if (vs->flags & VXLAN_F_REMCSUM_RX)
1345 if (!vxlan_remcsum(&unparsed, skb, vs->flags))
1347 if (vs->flags & VXLAN_F_GBP)
1348 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
1349 /* Note that GBP and GPE can never be active together. This is
1350 * ensured in vxlan_dev_configure.
1353 if (unparsed.vx_flags || unparsed.vx_vni) {
1354 /* If there are any unprocessed flags remaining treat
1355 * this as a malformed packet. This behavior diverges from
1356 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1357 * in reserved fields are to be ignored. The approach here
1358 * maintains compatibility with previous stack code, and also
1359 * is more robust and provides a little more security in
1360 * adding extensions to VXLAN.
1366 if (!vxlan_set_mac(vxlan, vs, skb))
1369 skb_reset_mac_header(skb);
1370 skb->dev = vxlan->dev;
1371 skb->pkt_type = PACKET_HOST;
1374 oiph = skb_network_header(skb);
1375 skb_reset_network_header(skb);
1377 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
1378 ++vxlan->dev->stats.rx_frame_errors;
1379 ++vxlan->dev->stats.rx_errors;
1383 stats = this_cpu_ptr(vxlan->dev->tstats);
1384 u64_stats_update_begin(&stats->syncp);
1385 stats->rx_packets++;
1386 stats->rx_bytes += skb->len;
1387 u64_stats_update_end(&stats->syncp);
1389 gro_cells_receive(&vxlan->gro_cells, skb);
1393 /* Consume bad packet */
1398 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1400 struct vxlan_dev *vxlan = netdev_priv(dev);
1401 struct arphdr *parp;
1404 struct neighbour *n;
1406 if (dev->flags & IFF_NOARP)
1409 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1410 dev->stats.tx_dropped++;
1413 parp = arp_hdr(skb);
1415 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1416 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1417 parp->ar_pro != htons(ETH_P_IP) ||
1418 parp->ar_op != htons(ARPOP_REQUEST) ||
1419 parp->ar_hln != dev->addr_len ||
1422 arpptr = (u8 *)parp + sizeof(struct arphdr);
1424 arpptr += dev->addr_len; /* sha */
1425 memcpy(&sip, arpptr, sizeof(sip));
1426 arpptr += sizeof(sip);
1427 arpptr += dev->addr_len; /* tha */
1428 memcpy(&tip, arpptr, sizeof(tip));
1430 if (ipv4_is_loopback(tip) ||
1431 ipv4_is_multicast(tip))
1434 n = neigh_lookup(&arp_tbl, &tip, dev);
1437 struct vxlan_fdb *f;
1438 struct sk_buff *reply;
1440 if (!(n->nud_state & NUD_CONNECTED)) {
1445 f = vxlan_find_mac(vxlan, n->ha);
1446 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1447 /* bridge-local neighbor */
1452 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1460 skb_reset_mac_header(reply);
1461 __skb_pull(reply, skb_network_offset(reply));
1462 reply->ip_summed = CHECKSUM_UNNECESSARY;
1463 reply->pkt_type = PACKET_HOST;
1465 if (netif_rx_ni(reply) == NET_RX_DROP)
1466 dev->stats.rx_dropped++;
1467 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1468 union vxlan_addr ipa = {
1469 .sin.sin_addr.s_addr = tip,
1470 .sin.sin_family = AF_INET,
1473 vxlan_ip_miss(dev, &ipa);
1477 return NETDEV_TX_OK;
1480 #if IS_ENABLED(CONFIG_IPV6)
1481 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1482 struct neighbour *n, bool isrouter)
1484 struct net_device *dev = request->dev;
1485 struct sk_buff *reply;
1486 struct nd_msg *ns, *na;
1487 struct ipv6hdr *pip6;
1489 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1496 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1497 sizeof(*na) + na_olen + dev->needed_tailroom;
1498 reply = alloc_skb(len, GFP_ATOMIC);
1502 reply->protocol = htons(ETH_P_IPV6);
1504 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1505 skb_push(reply, sizeof(struct ethhdr));
1506 skb_reset_mac_header(reply);
1508 ns = (struct nd_msg *)skb_transport_header(request);
1510 daddr = eth_hdr(request)->h_source;
1511 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1512 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1513 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1514 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1519 /* Ethernet header */
1520 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1521 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1522 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1523 reply->protocol = htons(ETH_P_IPV6);
1525 skb_pull(reply, sizeof(struct ethhdr));
1526 skb_reset_network_header(reply);
1527 skb_put(reply, sizeof(struct ipv6hdr));
1531 pip6 = ipv6_hdr(reply);
1532 memset(pip6, 0, sizeof(struct ipv6hdr));
1534 pip6->priority = ipv6_hdr(request)->priority;
1535 pip6->nexthdr = IPPROTO_ICMPV6;
1536 pip6->hop_limit = 255;
1537 pip6->daddr = ipv6_hdr(request)->saddr;
1538 pip6->saddr = *(struct in6_addr *)n->primary_key;
1540 skb_pull(reply, sizeof(struct ipv6hdr));
1541 skb_reset_transport_header(reply);
1543 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1545 /* Neighbor Advertisement */
1546 memset(na, 0, sizeof(*na)+na_olen);
1547 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1548 na->icmph.icmp6_router = isrouter;
1549 na->icmph.icmp6_override = 1;
1550 na->icmph.icmp6_solicited = 1;
1551 na->target = ns->target;
1552 ether_addr_copy(&na->opt[2], n->ha);
1553 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1554 na->opt[1] = na_olen >> 3;
1556 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1557 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1558 csum_partial(na, sizeof(*na)+na_olen, 0));
1560 pip6->payload_len = htons(sizeof(*na)+na_olen);
1562 skb_push(reply, sizeof(struct ipv6hdr));
1564 reply->ip_summed = CHECKSUM_UNNECESSARY;
1569 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1571 struct vxlan_dev *vxlan = netdev_priv(dev);
1573 const struct ipv6hdr *iphdr;
1574 const struct in6_addr *saddr, *daddr;
1575 struct neighbour *n;
1576 struct inet6_dev *in6_dev;
1578 in6_dev = __in6_dev_get(dev);
1582 iphdr = ipv6_hdr(skb);
1583 saddr = &iphdr->saddr;
1584 daddr = &iphdr->daddr;
1586 msg = (struct nd_msg *)skb_transport_header(skb);
1587 if (msg->icmph.icmp6_code != 0 ||
1588 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1591 if (ipv6_addr_loopback(daddr) ||
1592 ipv6_addr_is_multicast(&msg->target))
1595 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1598 struct vxlan_fdb *f;
1599 struct sk_buff *reply;
1601 if (!(n->nud_state & NUD_CONNECTED)) {
1606 f = vxlan_find_mac(vxlan, n->ha);
1607 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1608 /* bridge-local neighbor */
1613 reply = vxlan_na_create(skb, n,
1614 !!(f ? f->flags & NTF_ROUTER : 0));
1621 if (netif_rx_ni(reply) == NET_RX_DROP)
1622 dev->stats.rx_dropped++;
1624 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1625 union vxlan_addr ipa = {
1626 .sin6.sin6_addr = msg->target,
1627 .sin6.sin6_family = AF_INET6,
1630 vxlan_ip_miss(dev, &ipa);
1635 return NETDEV_TX_OK;
1639 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1641 struct vxlan_dev *vxlan = netdev_priv(dev);
1642 struct neighbour *n;
1644 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1648 switch (ntohs(eth_hdr(skb)->h_proto)) {
1653 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1656 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1657 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1658 union vxlan_addr ipa = {
1659 .sin.sin_addr.s_addr = pip->daddr,
1660 .sin.sin_family = AF_INET,
1663 vxlan_ip_miss(dev, &ipa);
1669 #if IS_ENABLED(CONFIG_IPV6)
1672 struct ipv6hdr *pip6;
1674 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1676 pip6 = ipv6_hdr(skb);
1677 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1678 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1679 union vxlan_addr ipa = {
1680 .sin6.sin6_addr = pip6->daddr,
1681 .sin6.sin6_family = AF_INET6,
1684 vxlan_ip_miss(dev, &ipa);
1698 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1700 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1702 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1711 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1712 struct vxlan_metadata *md)
1714 struct vxlanhdr_gbp *gbp;
1719 gbp = (struct vxlanhdr_gbp *)vxh;
1720 vxh->vx_flags |= VXLAN_HF_GBP;
1722 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1723 gbp->dont_learn = 1;
1725 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1726 gbp->policy_applied = 1;
1728 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1731 static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
1734 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
1736 gpe->np_applied = 1;
1739 case htons(ETH_P_IP):
1740 gpe->next_protocol = VXLAN_GPE_NP_IPV4;
1742 case htons(ETH_P_IPV6):
1743 gpe->next_protocol = VXLAN_GPE_NP_IPV6;
1745 case htons(ETH_P_TEB):
1746 gpe->next_protocol = VXLAN_GPE_NP_ETHERNET;
1749 return -EPFNOSUPPORT;
1752 static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1753 int iphdr_len, __be32 vni,
1754 struct vxlan_metadata *md, u32 vxflags,
1757 struct vxlanhdr *vxh;
1760 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1761 __be16 inner_protocol = htons(ETH_P_TEB);
1763 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1764 skb->ip_summed == CHECKSUM_PARTIAL) {
1765 int csum_start = skb_checksum_start_offset(skb);
1767 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1768 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1769 (skb->csum_offset == offsetof(struct udphdr, check) ||
1770 skb->csum_offset == offsetof(struct tcphdr, check)))
1771 type |= SKB_GSO_TUNNEL_REMCSUM;
1774 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1775 + VXLAN_HLEN + iphdr_len
1776 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1778 /* Need space for new headers (invalidates iph ptr) */
1779 err = skb_cow_head(skb, min_headroom);
1783 skb = vlan_hwaccel_push_inside(skb);
1787 err = iptunnel_handle_offloads(skb, type);
1791 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1792 vxh->vx_flags = VXLAN_HF_VNI;
1793 vxh->vx_vni = vxlan_vni_field(vni);
1795 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1798 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
1799 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
1800 vxh->vx_flags |= VXLAN_HF_RCO;
1802 if (!skb_is_gso(skb)) {
1803 skb->ip_summed = CHECKSUM_NONE;
1804 skb->encapsulation = 0;
1808 if (vxflags & VXLAN_F_GBP)
1809 vxlan_build_gbp_hdr(vxh, vxflags, md);
1810 if (vxflags & VXLAN_F_GPE) {
1811 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
1814 inner_protocol = skb->protocol;
1817 skb_set_inner_protocol(skb, inner_protocol);
1825 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
1826 struct sk_buff *skb, int oif, u8 tos,
1827 __be32 daddr, __be32 *saddr,
1828 struct dst_cache *dst_cache,
1829 const struct ip_tunnel_info *info)
1831 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1832 struct rtable *rt = NULL;
1838 rt = dst_cache_get_ip4(dst_cache, saddr);
1843 memset(&fl4, 0, sizeof(fl4));
1844 fl4.flowi4_oif = oif;
1845 fl4.flowi4_tos = RT_TOS(tos);
1846 fl4.flowi4_mark = skb->mark;
1847 fl4.flowi4_proto = IPPROTO_UDP;
1851 rt = ip_route_output_key(vxlan->net, &fl4);
1855 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
1860 #if IS_ENABLED(CONFIG_IPV6)
1861 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1862 struct sk_buff *skb, int oif, u8 tos,
1864 const struct in6_addr *daddr,
1865 struct in6_addr *saddr,
1866 struct dst_cache *dst_cache,
1867 const struct ip_tunnel_info *info)
1869 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
1870 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1871 struct dst_entry *ndst;
1876 return ERR_PTR(-EIO);
1881 ndst = dst_cache_get_ip6(dst_cache, saddr);
1886 memset(&fl6, 0, sizeof(fl6));
1887 fl6.flowi6_oif = oif;
1890 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
1891 fl6.flowi6_mark = skb->mark;
1892 fl6.flowi6_proto = IPPROTO_UDP;
1894 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
1898 return ERR_PTR(err);
1902 dst_cache_set_ip6(dst_cache, ndst, saddr);
1907 /* Bypass encapsulation if the destination is local */
1908 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1909 struct vxlan_dev *dst_vxlan)
1911 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1912 union vxlan_addr loopback;
1913 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1914 struct net_device *dev = skb->dev;
1917 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1918 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1919 skb->pkt_type = PACKET_HOST;
1920 skb->encapsulation = 0;
1921 skb->dev = dst_vxlan->dev;
1922 __skb_pull(skb, skb_network_offset(skb));
1924 if (remote_ip->sa.sa_family == AF_INET) {
1925 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1926 loopback.sa.sa_family = AF_INET;
1927 #if IS_ENABLED(CONFIG_IPV6)
1929 loopback.sin6.sin6_addr = in6addr_loopback;
1930 loopback.sa.sa_family = AF_INET6;
1934 if (dst_vxlan->flags & VXLAN_F_LEARN)
1935 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
1937 u64_stats_update_begin(&tx_stats->syncp);
1938 tx_stats->tx_packets++;
1939 tx_stats->tx_bytes += len;
1940 u64_stats_update_end(&tx_stats->syncp);
1942 if (netif_rx(skb) == NET_RX_SUCCESS) {
1943 u64_stats_update_begin(&rx_stats->syncp);
1944 rx_stats->rx_packets++;
1945 rx_stats->rx_bytes += len;
1946 u64_stats_update_end(&rx_stats->syncp);
1948 dev->stats.rx_dropped++;
1952 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1953 struct vxlan_rdst *rdst, bool did_rsc)
1955 struct dst_cache *dst_cache;
1956 struct ip_tunnel_info *info;
1957 struct vxlan_dev *vxlan = netdev_priv(dev);
1959 struct rtable *rt = NULL;
1960 const struct iphdr *old_iph;
1961 union vxlan_addr *dst;
1962 union vxlan_addr remote_ip, local_ip;
1963 struct vxlan_metadata _md;
1964 struct vxlan_metadata *md = &_md;
1965 __be16 src_port = 0, dst_port;
1970 u32 flags = vxlan->flags;
1971 bool udp_sum = false;
1972 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
1974 info = skb_tunnel_info(skb);
1978 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
1979 vni = rdst->remote_vni;
1980 dst = &rdst->remote_ip;
1981 local_ip = vxlan->cfg.saddr;
1982 dst_cache = &rdst->dst_cache;
1985 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
1989 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
1990 vni = tunnel_id_to_key32(info->key.tun_id);
1991 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
1992 if (remote_ip.sa.sa_family == AF_INET) {
1993 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
1994 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
1996 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
1997 local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
2000 dst_cache = &info->dst_cache;
2003 if (vxlan_addr_any(dst)) {
2005 /* short-circuited back to local bridge */
2006 vxlan_encap_bypass(skb, vxlan, vxlan);
2012 old_iph = ip_hdr(skb);
2014 ttl = vxlan->cfg.ttl;
2015 if (!ttl && vxlan_addr_multicast(dst))
2018 tos = vxlan->cfg.tos;
2020 tos = ip_tunnel_get_dsfield(old_iph, skb);
2022 label = vxlan->cfg.label;
2023 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2024 vxlan->cfg.port_max, true);
2027 ttl = info->key.ttl;
2028 tos = info->key.tos;
2029 label = info->key.label;
2030 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
2032 if (info->options_len)
2033 md = ip_tunnel_info_opts(info);
2035 md->gbp = skb->mark;
2038 if (dst->sa.sa_family == AF_INET) {
2039 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2043 sk = sock4->sock->sk;
2045 rt = vxlan_get_route(vxlan, skb,
2046 rdst ? rdst->remote_ifindex : 0, tos,
2047 dst->sin.sin_addr.s_addr,
2048 &local_ip.sin.sin_addr.s_addr,
2051 netdev_dbg(dev, "no route to %pI4\n",
2052 &dst->sin.sin_addr.s_addr);
2053 dev->stats.tx_carrier_errors++;
2057 if (rt->dst.dev == dev) {
2058 netdev_dbg(dev, "circular route to %pI4\n",
2059 &dst->sin.sin_addr.s_addr);
2060 dev->stats.collisions++;
2064 /* Bypass encapsulation if the destination is local */
2065 if (!info && rt->rt_flags & RTCF_LOCAL &&
2066 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2067 struct vxlan_dev *dst_vxlan;
2070 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
2071 dst->sa.sa_family, dst_port,
2075 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
2080 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
2081 else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
2084 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2085 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2086 err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr),
2087 vni, md, flags, udp_sum);
2091 udp_tunnel_xmit_skb(rt, sk, skb, local_ip.sin.sin_addr.s_addr,
2092 dst->sin.sin_addr.s_addr, tos, ttl, df,
2093 src_port, dst_port, xnet, !udp_sum);
2094 #if IS_ENABLED(CONFIG_IPV6)
2096 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2097 struct dst_entry *ndst;
2102 sk = sock6->sock->sk;
2104 ndst = vxlan6_get_route(vxlan, skb,
2105 rdst ? rdst->remote_ifindex : 0, tos,
2106 label, &dst->sin6.sin6_addr,
2107 &local_ip.sin6.sin6_addr,
2110 netdev_dbg(dev, "no route to %pI6\n",
2111 &dst->sin6.sin6_addr);
2112 dev->stats.tx_carrier_errors++;
2116 if (ndst->dev == dev) {
2117 netdev_dbg(dev, "circular route to %pI6\n",
2118 &dst->sin6.sin6_addr);
2120 dev->stats.collisions++;
2124 /* Bypass encapsulation if the destination is local */
2125 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2126 if (!info && rt6i_flags & RTF_LOCAL &&
2127 !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2128 struct vxlan_dev *dst_vxlan;
2131 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
2132 dst->sa.sa_family, dst_port,
2136 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
2141 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2143 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2144 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2145 skb_scrub_packet(skb, xnet);
2146 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
2147 vni, md, flags, udp_sum);
2150 dev->stats.tx_errors++;
2153 udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
2154 &local_ip.sin6.sin6_addr,
2155 &dst->sin6.sin6_addr, tos, ttl,
2156 label, src_port, dst_port, !udp_sum);
2164 dev->stats.tx_dropped++;
2168 /* skb is already freed. */
2173 dev->stats.tx_errors++;
2179 /* Transmit local packets over Vxlan
2181 * Outer IP header inherits ECN and DF from inner header.
2182 * Outer UDP destination is the VXLAN assigned port.
2183 * source port is based on hash of flow
2185 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2187 struct vxlan_dev *vxlan = netdev_priv(dev);
2188 const struct ip_tunnel_info *info;
2190 bool did_rsc = false;
2191 struct vxlan_rdst *rdst, *fdst = NULL;
2192 struct vxlan_fdb *f;
2194 info = skb_tunnel_info(skb);
2196 skb_reset_mac_header(skb);
2198 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
2199 if (info && info->mode & IP_TUNNEL_INFO_TX)
2200 vxlan_xmit_one(skb, dev, NULL, false);
2203 return NETDEV_TX_OK;
2206 if (vxlan->flags & VXLAN_F_PROXY) {
2208 if (ntohs(eth->h_proto) == ETH_P_ARP)
2209 return arp_reduce(dev, skb);
2210 #if IS_ENABLED(CONFIG_IPV6)
2211 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2212 pskb_may_pull(skb, sizeof(struct ipv6hdr)
2213 + sizeof(struct nd_msg)) &&
2214 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2217 msg = (struct nd_msg *)skb_transport_header(skb);
2218 if (msg->icmph.icmp6_code == 0 &&
2219 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2220 return neigh_reduce(dev, skb);
2226 f = vxlan_find_mac(vxlan, eth->h_dest);
2229 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
2230 (ntohs(eth->h_proto) == ETH_P_IP ||
2231 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2232 did_rsc = route_shortcircuit(dev, skb);
2234 f = vxlan_find_mac(vxlan, eth->h_dest);
2238 f = vxlan_find_mac(vxlan, all_zeros_mac);
2240 if ((vxlan->flags & VXLAN_F_L2MISS) &&
2241 !is_multicast_ether_addr(eth->h_dest))
2242 vxlan_fdb_miss(vxlan, eth->h_dest);
2244 dev->stats.tx_dropped++;
2246 return NETDEV_TX_OK;
2250 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2251 struct sk_buff *skb1;
2257 skb1 = skb_clone(skb, GFP_ATOMIC);
2259 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
2263 vxlan_xmit_one(skb, dev, fdst, did_rsc);
2266 return NETDEV_TX_OK;
2269 /* Walk the forwarding table and purge stale entries */
2270 static void vxlan_cleanup(unsigned long arg)
2272 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
2273 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2276 if (!netif_running(vxlan->dev))
2279 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2280 struct hlist_node *p, *n;
2282 spin_lock_bh(&vxlan->hash_lock);
2283 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2285 = container_of(p, struct vxlan_fdb, hlist);
2286 unsigned long timeout;
2288 if (f->state & (NUD_PERMANENT | NUD_NOARP))
2291 timeout = f->used + vxlan->cfg.age_interval * HZ;
2292 if (time_before_eq(timeout, jiffies)) {
2293 netdev_dbg(vxlan->dev,
2294 "garbage collect %pM\n",
2296 f->state = NUD_STALE;
2297 vxlan_fdb_destroy(vxlan, f);
2298 } else if (time_before(timeout, next_timer))
2299 next_timer = timeout;
2301 spin_unlock_bh(&vxlan->hash_lock);
2304 mod_timer(&vxlan->age_timer, next_timer);
2307 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
2309 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2311 spin_lock(&vn->sock_lock);
2312 hlist_del_init_rcu(&vxlan->hlist4.hlist);
2313 #if IS_ENABLED(CONFIG_IPV6)
2314 hlist_del_init_rcu(&vxlan->hlist6.hlist);
2316 spin_unlock(&vn->sock_lock);
2319 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
2320 struct vxlan_dev_node *node)
2322 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2323 __be32 vni = vxlan->default_dst.remote_vni;
2325 node->vxlan = vxlan;
2326 spin_lock(&vn->sock_lock);
2327 hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
2328 spin_unlock(&vn->sock_lock);
2331 /* Setup stats when device is created */
2332 static int vxlan_init(struct net_device *dev)
2334 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2341 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
2343 struct vxlan_fdb *f;
2345 spin_lock_bh(&vxlan->hash_lock);
2346 f = __vxlan_find_mac(vxlan, all_zeros_mac);
2348 vxlan_fdb_destroy(vxlan, f);
2349 spin_unlock_bh(&vxlan->hash_lock);
2352 static void vxlan_uninit(struct net_device *dev)
2354 struct vxlan_dev *vxlan = netdev_priv(dev);
2356 vxlan_fdb_delete_default(vxlan);
2358 free_percpu(dev->tstats);
2361 /* Start ageing timer and join group when device is brought up */
2362 static int vxlan_open(struct net_device *dev)
2364 struct vxlan_dev *vxlan = netdev_priv(dev);
2367 ret = vxlan_sock_add(vxlan);
2371 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2372 ret = vxlan_igmp_join(vxlan);
2373 if (ret == -EADDRINUSE)
2376 vxlan_sock_release(vxlan);
2381 if (vxlan->cfg.age_interval)
2382 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2387 /* Purge the forwarding table */
2388 static void vxlan_flush(struct vxlan_dev *vxlan)
2392 spin_lock_bh(&vxlan->hash_lock);
2393 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2394 struct hlist_node *p, *n;
2395 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2397 = container_of(p, struct vxlan_fdb, hlist);
2398 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2399 if (!is_zero_ether_addr(f->eth_addr))
2400 vxlan_fdb_destroy(vxlan, f);
2403 spin_unlock_bh(&vxlan->hash_lock);
2406 /* Cleanup timer and forwarding table on shutdown */
2407 static int vxlan_stop(struct net_device *dev)
2409 struct vxlan_dev *vxlan = netdev_priv(dev);
2410 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2413 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2414 !vxlan_group_used(vn, vxlan))
2415 ret = vxlan_igmp_leave(vxlan);
2417 del_timer_sync(&vxlan->age_timer);
2420 vxlan_sock_release(vxlan);
2425 /* Stub, nothing needs to be done. */
2426 static void vxlan_set_multicast_list(struct net_device *dev)
2430 static int __vxlan_change_mtu(struct net_device *dev,
2431 struct net_device *lowerdev,
2432 struct vxlan_rdst *dst, int new_mtu, bool strict)
2434 int max_mtu = IP_MAX_MTU;
2437 max_mtu = lowerdev->mtu;
2439 if (dst->remote_ip.sa.sa_family == AF_INET6)
2440 max_mtu -= VXLAN6_HEADROOM;
2442 max_mtu -= VXLAN_HEADROOM;
2447 if (new_mtu > max_mtu) {
2458 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2460 struct vxlan_dev *vxlan = netdev_priv(dev);
2461 struct vxlan_rdst *dst = &vxlan->default_dst;
2462 struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2463 dst->remote_ifindex);
2464 return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
2467 static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2469 struct vxlan_dev *vxlan = netdev_priv(dev);
2470 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2471 __be16 sport, dport;
2473 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2474 vxlan->cfg.port_max, true);
2475 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2477 if (ip_tunnel_info_af(info) == AF_INET) {
2478 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2483 rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
2484 info->key.u.ipv4.dst,
2485 &info->key.u.ipv4.src,
2486 &info->dst_cache, info);
2491 #if IS_ENABLED(CONFIG_IPV6)
2492 struct dst_entry *ndst;
2494 ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
2495 info->key.label, &info->key.u.ipv6.dst,
2496 &info->key.u.ipv6.src,
2497 &info->dst_cache, info);
2499 return PTR_ERR(ndst);
2501 #else /* !CONFIG_IPV6 */
2502 return -EPFNOSUPPORT;
2505 info->key.tp_src = sport;
2506 info->key.tp_dst = dport;
2510 static const struct net_device_ops vxlan_netdev_ether_ops = {
2511 .ndo_init = vxlan_init,
2512 .ndo_uninit = vxlan_uninit,
2513 .ndo_open = vxlan_open,
2514 .ndo_stop = vxlan_stop,
2515 .ndo_start_xmit = vxlan_xmit,
2516 .ndo_get_stats64 = ip_tunnel_get_stats64,
2517 .ndo_set_rx_mode = vxlan_set_multicast_list,
2518 .ndo_change_mtu = vxlan_change_mtu,
2519 .ndo_validate_addr = eth_validate_addr,
2520 .ndo_set_mac_address = eth_mac_addr,
2521 .ndo_fdb_add = vxlan_fdb_add,
2522 .ndo_fdb_del = vxlan_fdb_delete,
2523 .ndo_fdb_dump = vxlan_fdb_dump,
2524 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2527 static const struct net_device_ops vxlan_netdev_raw_ops = {
2528 .ndo_init = vxlan_init,
2529 .ndo_uninit = vxlan_uninit,
2530 .ndo_open = vxlan_open,
2531 .ndo_stop = vxlan_stop,
2532 .ndo_start_xmit = vxlan_xmit,
2533 .ndo_get_stats64 = ip_tunnel_get_stats64,
2534 .ndo_change_mtu = vxlan_change_mtu,
2535 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2538 /* Info for udev, that this is a virtual tunnel endpoint */
2539 static struct device_type vxlan_type = {
2543 /* Calls the ndo_udp_tunnel_add of the caller in order to
2544 * supply the listening VXLAN udp ports. Callers are expected
2545 * to implement the ndo_udp_tunnel_add.
2547 static void vxlan_push_rx_ports(struct net_device *dev)
2549 struct vxlan_sock *vs;
2550 struct net *net = dev_net(dev);
2551 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2554 spin_lock(&vn->sock_lock);
2555 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2556 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist)
2557 udp_tunnel_push_rx_port(dev, vs->sock,
2558 (vs->flags & VXLAN_F_GPE) ?
2559 UDP_TUNNEL_TYPE_VXLAN_GPE :
2560 UDP_TUNNEL_TYPE_VXLAN);
2562 spin_unlock(&vn->sock_lock);
2565 /* Initialize the device structure. */
2566 static void vxlan_setup(struct net_device *dev)
2568 struct vxlan_dev *vxlan = netdev_priv(dev);
2571 eth_hw_addr_random(dev);
2574 dev->destructor = free_netdev;
2575 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2577 dev->features |= NETIF_F_LLTX;
2578 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2579 dev->features |= NETIF_F_RXCSUM;
2580 dev->features |= NETIF_F_GSO_SOFTWARE;
2582 dev->vlan_features = dev->features;
2583 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2584 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2585 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2586 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2587 netif_keep_dst(dev);
2588 dev->priv_flags |= IFF_NO_QUEUE;
2590 INIT_LIST_HEAD(&vxlan->next);
2591 spin_lock_init(&vxlan->hash_lock);
2593 init_timer_deferrable(&vxlan->age_timer);
2594 vxlan->age_timer.function = vxlan_cleanup;
2595 vxlan->age_timer.data = (unsigned long) vxlan;
2597 vxlan->cfg.dst_port = htons(vxlan_port);
2601 gro_cells_init(&vxlan->gro_cells, dev);
2603 for (h = 0; h < FDB_HASH_SIZE; ++h)
2604 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2607 static void vxlan_ether_setup(struct net_device *dev)
2609 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2610 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2611 dev->netdev_ops = &vxlan_netdev_ether_ops;
2614 static void vxlan_raw_setup(struct net_device *dev)
2616 dev->header_ops = NULL;
2617 dev->type = ARPHRD_NONE;
2618 dev->hard_header_len = 0;
2620 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2621 dev->netdev_ops = &vxlan_netdev_raw_ops;
2624 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2625 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2626 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2627 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2628 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2629 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2630 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2631 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2632 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2633 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 },
2634 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2635 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2636 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2637 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2638 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2639 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2640 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2641 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2642 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
2643 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2644 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2645 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2646 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2647 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2648 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2649 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2650 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
2651 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2654 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
2656 if (tb[IFLA_ADDRESS]) {
2657 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2658 pr_debug("invalid link address (not ethernet)\n");
2662 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2663 pr_debug("invalid all zero ethernet address\n");
2664 return -EADDRNOTAVAIL;
2671 if (data[IFLA_VXLAN_ID]) {
2672 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2673 if (id >= VXLAN_N_VID)
2677 if (data[IFLA_VXLAN_PORT_RANGE]) {
2678 const struct ifla_vxlan_port_range *p
2679 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2681 if (ntohs(p->high) < ntohs(p->low)) {
2682 pr_debug("port range %u .. %u not valid\n",
2683 ntohs(p->low), ntohs(p->high));
2691 static void vxlan_get_drvinfo(struct net_device *netdev,
2692 struct ethtool_drvinfo *drvinfo)
2694 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2695 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2698 static const struct ethtool_ops vxlan_ethtool_ops = {
2699 .get_drvinfo = vxlan_get_drvinfo,
2700 .get_link = ethtool_op_get_link,
2703 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2704 __be16 port, u32 flags)
2706 struct socket *sock;
2707 struct udp_port_cfg udp_conf;
2710 memset(&udp_conf, 0, sizeof(udp_conf));
2713 udp_conf.family = AF_INET6;
2714 udp_conf.use_udp6_rx_checksums =
2715 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2716 udp_conf.ipv6_v6only = 1;
2718 udp_conf.family = AF_INET;
2721 udp_conf.local_udp_port = port;
2723 /* Open UDP socket */
2724 err = udp_sock_create(net, &udp_conf, &sock);
2726 return ERR_PTR(err);
2731 /* Create new listen socket if needed */
2732 static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
2733 __be16 port, u32 flags)
2735 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2736 struct vxlan_sock *vs;
2737 struct socket *sock;
2739 struct udp_tunnel_sock_cfg tunnel_cfg;
2741 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2743 return ERR_PTR(-ENOMEM);
2745 for (h = 0; h < VNI_HASH_SIZE; ++h)
2746 INIT_HLIST_HEAD(&vs->vni_list[h]);
2748 sock = vxlan_create_sock(net, ipv6, port, flags);
2750 pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
2753 return ERR_CAST(sock);
2757 atomic_set(&vs->refcnt, 1);
2758 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2760 spin_lock(&vn->sock_lock);
2761 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2762 udp_tunnel_notify_add_rx_port(sock,
2763 (vs->flags & VXLAN_F_GPE) ?
2764 UDP_TUNNEL_TYPE_VXLAN_GPE :
2765 UDP_TUNNEL_TYPE_VXLAN);
2766 spin_unlock(&vn->sock_lock);
2768 /* Mark socket as an encapsulation socket. */
2769 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
2770 tunnel_cfg.sk_user_data = vs;
2771 tunnel_cfg.encap_type = 1;
2772 tunnel_cfg.encap_rcv = vxlan_rcv;
2773 tunnel_cfg.encap_destroy = NULL;
2774 tunnel_cfg.gro_receive = vxlan_gro_receive;
2775 tunnel_cfg.gro_complete = vxlan_gro_complete;
2777 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2782 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
2784 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2785 struct vxlan_sock *vs = NULL;
2786 struct vxlan_dev_node *node;
2788 if (!vxlan->cfg.no_share) {
2789 spin_lock(&vn->sock_lock);
2790 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
2791 vxlan->cfg.dst_port, vxlan->flags);
2792 if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) {
2793 spin_unlock(&vn->sock_lock);
2796 spin_unlock(&vn->sock_lock);
2799 vs = vxlan_socket_create(vxlan->net, ipv6,
2800 vxlan->cfg.dst_port, vxlan->flags);
2803 #if IS_ENABLED(CONFIG_IPV6)
2805 rcu_assign_pointer(vxlan->vn6_sock, vs);
2806 node = &vxlan->hlist6;
2810 rcu_assign_pointer(vxlan->vn4_sock, vs);
2811 node = &vxlan->hlist4;
2813 vxlan_vs_add_dev(vs, vxlan, node);
2817 static int vxlan_sock_add(struct vxlan_dev *vxlan)
2819 bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
2820 bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
2823 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
2824 #if IS_ENABLED(CONFIG_IPV6)
2825 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
2826 if (ipv6 || metadata)
2827 ret = __vxlan_sock_add(vxlan, true);
2829 if (!ret && (!ipv6 || metadata))
2830 ret = __vxlan_sock_add(vxlan, false);
2832 vxlan_sock_release(vxlan);
2836 static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2837 struct vxlan_config *conf)
2839 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2840 struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
2841 struct vxlan_rdst *dst = &vxlan->default_dst;
2842 unsigned short needed_headroom = ETH_HLEN;
2844 bool use_ipv6 = false;
2845 __be16 default_port = vxlan->cfg.dst_port;
2846 struct net_device *lowerdev = NULL;
2848 if (conf->flags & VXLAN_F_GPE) {
2849 /* For now, allow GPE only together with COLLECT_METADATA.
2850 * This can be relaxed later; in such case, the other side
2851 * of the PtP link will have to be provided.
2853 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
2854 !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
2855 pr_info("unsupported combination of extensions\n");
2859 vxlan_raw_setup(dev);
2861 vxlan_ether_setup(dev);
2864 vxlan->net = src_net;
2866 dst->remote_vni = conf->vni;
2868 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
2870 /* Unless IPv6 is explicitly requested, assume IPv4 */
2871 if (!dst->remote_ip.sa.sa_family)
2872 dst->remote_ip.sa.sa_family = AF_INET;
2874 if (dst->remote_ip.sa.sa_family == AF_INET6 ||
2875 vxlan->cfg.saddr.sa.sa_family == AF_INET6) {
2876 if (!IS_ENABLED(CONFIG_IPV6))
2877 return -EPFNOSUPPORT;
2879 vxlan->flags |= VXLAN_F_IPV6;
2882 if (conf->label && !use_ipv6) {
2883 pr_info("label only supported in use with IPv6\n");
2887 if (conf->remote_ifindex) {
2888 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
2889 dst->remote_ifindex = conf->remote_ifindex;
2892 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
2896 #if IS_ENABLED(CONFIG_IPV6)
2898 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2899 if (idev && idev->cnf.disable_ipv6) {
2900 pr_info("IPv6 is disabled via sysctl\n");
2907 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2909 needed_headroom = lowerdev->hard_header_len;
2910 } else if (vxlan_addr_multicast(&dst->remote_ip)) {
2911 pr_info("multicast destination requires interface to be specified\n");
2916 err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
2921 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
2922 needed_headroom += VXLAN6_HEADROOM;
2924 needed_headroom += VXLAN_HEADROOM;
2925 dev->needed_headroom = needed_headroom;
2927 memcpy(&vxlan->cfg, conf, sizeof(*conf));
2928 if (!vxlan->cfg.dst_port) {
2929 if (conf->flags & VXLAN_F_GPE)
2930 vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
2932 vxlan->cfg.dst_port = default_port;
2934 vxlan->flags |= conf->flags;
2936 if (!vxlan->cfg.age_interval)
2937 vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
2939 list_for_each_entry(tmp, &vn->vxlan_list, next) {
2940 if (tmp->cfg.vni == conf->vni &&
2941 (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
2942 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
2943 tmp->cfg.dst_port == vxlan->cfg.dst_port &&
2944 (tmp->flags & VXLAN_F_RCV_FLAGS) ==
2945 (vxlan->flags & VXLAN_F_RCV_FLAGS)) {
2946 pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
2951 dev->ethtool_ops = &vxlan_ethtool_ops;
2953 /* create an fdb entry for a valid default destination */
2954 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2955 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2956 &vxlan->default_dst.remote_ip,
2957 NUD_REACHABLE|NUD_PERMANENT,
2958 NLM_F_EXCL|NLM_F_CREATE,
2959 vxlan->cfg.dst_port,
2960 vxlan->default_dst.remote_vni,
2961 vxlan->default_dst.remote_ifindex,
2967 err = register_netdevice(dev);
2969 vxlan_fdb_delete_default(vxlan);
2973 list_add(&vxlan->next, &vn->vxlan_list);
2978 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2979 struct nlattr *tb[], struct nlattr *data[])
2981 struct vxlan_config conf;
2983 memset(&conf, 0, sizeof(conf));
2985 if (data[IFLA_VXLAN_ID])
2986 conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
2988 if (data[IFLA_VXLAN_GROUP]) {
2989 conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
2990 } else if (data[IFLA_VXLAN_GROUP6]) {
2991 if (!IS_ENABLED(CONFIG_IPV6))
2992 return -EPFNOSUPPORT;
2994 conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
2995 conf.remote_ip.sa.sa_family = AF_INET6;
2998 if (data[IFLA_VXLAN_LOCAL]) {
2999 conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
3000 conf.saddr.sa.sa_family = AF_INET;
3001 } else if (data[IFLA_VXLAN_LOCAL6]) {
3002 if (!IS_ENABLED(CONFIG_IPV6))
3003 return -EPFNOSUPPORT;
3005 /* TODO: respect scope id */
3006 conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
3007 conf.saddr.sa.sa_family = AF_INET6;
3010 if (data[IFLA_VXLAN_LINK])
3011 conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
3013 if (data[IFLA_VXLAN_TOS])
3014 conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
3016 if (data[IFLA_VXLAN_TTL])
3017 conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
3019 if (data[IFLA_VXLAN_LABEL])
3020 conf.label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
3021 IPV6_FLOWLABEL_MASK;
3023 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
3024 conf.flags |= VXLAN_F_LEARN;
3026 if (data[IFLA_VXLAN_AGEING])
3027 conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
3029 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
3030 conf.flags |= VXLAN_F_PROXY;
3032 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
3033 conf.flags |= VXLAN_F_RSC;
3035 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
3036 conf.flags |= VXLAN_F_L2MISS;
3038 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
3039 conf.flags |= VXLAN_F_L3MISS;
3041 if (data[IFLA_VXLAN_LIMIT])
3042 conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
3044 if (data[IFLA_VXLAN_COLLECT_METADATA] &&
3045 nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
3046 conf.flags |= VXLAN_F_COLLECT_METADATA;
3048 if (data[IFLA_VXLAN_PORT_RANGE]) {
3049 const struct ifla_vxlan_port_range *p
3050 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
3051 conf.port_min = ntohs(p->low);
3052 conf.port_max = ntohs(p->high);
3055 if (data[IFLA_VXLAN_PORT])
3056 conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
3058 if (data[IFLA_VXLAN_UDP_CSUM] &&
3059 !nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
3060 conf.flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
3062 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
3063 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
3064 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
3066 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
3067 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
3068 conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
3070 if (data[IFLA_VXLAN_REMCSUM_TX] &&
3071 nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
3072 conf.flags |= VXLAN_F_REMCSUM_TX;
3074 if (data[IFLA_VXLAN_REMCSUM_RX] &&
3075 nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
3076 conf.flags |= VXLAN_F_REMCSUM_RX;
3078 if (data[IFLA_VXLAN_GBP])
3079 conf.flags |= VXLAN_F_GBP;
3081 if (data[IFLA_VXLAN_GPE])
3082 conf.flags |= VXLAN_F_GPE;
3084 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
3085 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
3088 conf.mtu = nla_get_u32(tb[IFLA_MTU]);
3090 return vxlan_dev_configure(src_net, dev, &conf);
3093 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3095 struct vxlan_dev *vxlan = netdev_priv(dev);
3097 gro_cells_destroy(&vxlan->gro_cells);
3098 list_del(&vxlan->next);
3099 unregister_netdevice_queue(dev, head);
3102 static size_t vxlan_get_size(const struct net_device *dev)
3105 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
3106 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
3107 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
3108 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
3109 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
3110 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
3111 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
3112 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
3113 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
3114 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
3115 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
3116 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
3117 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
3118 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
3119 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
3120 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
3121 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
3122 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
3123 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
3124 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
3125 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
3126 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
3130 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
3132 const struct vxlan_dev *vxlan = netdev_priv(dev);
3133 const struct vxlan_rdst *dst = &vxlan->default_dst;
3134 struct ifla_vxlan_port_range ports = {
3135 .low = htons(vxlan->cfg.port_min),
3136 .high = htons(vxlan->cfg.port_max),
3139 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
3140 goto nla_put_failure;
3142 if (!vxlan_addr_any(&dst->remote_ip)) {
3143 if (dst->remote_ip.sa.sa_family == AF_INET) {
3144 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
3145 dst->remote_ip.sin.sin_addr.s_addr))
3146 goto nla_put_failure;
3147 #if IS_ENABLED(CONFIG_IPV6)
3149 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
3150 &dst->remote_ip.sin6.sin6_addr))
3151 goto nla_put_failure;
3156 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
3157 goto nla_put_failure;
3159 if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
3160 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
3161 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
3162 vxlan->cfg.saddr.sin.sin_addr.s_addr))
3163 goto nla_put_failure;
3164 #if IS_ENABLED(CONFIG_IPV6)
3166 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
3167 &vxlan->cfg.saddr.sin6.sin6_addr))
3168 goto nla_put_failure;
3173 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
3174 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
3175 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
3176 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
3177 !!(vxlan->flags & VXLAN_F_LEARN)) ||
3178 nla_put_u8(skb, IFLA_VXLAN_PROXY,
3179 !!(vxlan->flags & VXLAN_F_PROXY)) ||
3180 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
3181 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
3182 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
3183 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
3184 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
3185 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
3186 !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
3187 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
3188 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
3189 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
3190 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
3191 !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
3192 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
3193 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
3194 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
3195 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
3196 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
3197 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
3198 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
3199 !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
3200 goto nla_put_failure;
3202 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
3203 goto nla_put_failure;
3205 if (vxlan->flags & VXLAN_F_GBP &&
3206 nla_put_flag(skb, IFLA_VXLAN_GBP))
3207 goto nla_put_failure;
3209 if (vxlan->flags & VXLAN_F_GPE &&
3210 nla_put_flag(skb, IFLA_VXLAN_GPE))
3211 goto nla_put_failure;
3213 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
3214 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
3215 goto nla_put_failure;
3223 static struct net *vxlan_get_link_net(const struct net_device *dev)
3225 struct vxlan_dev *vxlan = netdev_priv(dev);
3230 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
3232 .maxtype = IFLA_VXLAN_MAX,
3233 .policy = vxlan_policy,
3234 .priv_size = sizeof(struct vxlan_dev),
3235 .setup = vxlan_setup,
3236 .validate = vxlan_validate,
3237 .newlink = vxlan_newlink,
3238 .dellink = vxlan_dellink,
3239 .get_size = vxlan_get_size,
3240 .fill_info = vxlan_fill_info,
3241 .get_link_net = vxlan_get_link_net,
3244 struct net_device *vxlan_dev_create(struct net *net, const char *name,
3245 u8 name_assign_type,
3246 struct vxlan_config *conf)
3248 struct nlattr *tb[IFLA_MAX + 1];
3249 struct net_device *dev;
3252 memset(&tb, 0, sizeof(tb));
3254 dev = rtnl_create_link(net, name, name_assign_type,
3255 &vxlan_link_ops, tb);
3259 err = vxlan_dev_configure(net, dev, conf);
3262 return ERR_PTR(err);
3265 err = rtnl_configure_link(dev, NULL);
3267 LIST_HEAD(list_kill);
3269 vxlan_dellink(dev, &list_kill);
3270 unregister_netdevice_many(&list_kill);
3271 return ERR_PTR(err);
3276 EXPORT_SYMBOL_GPL(vxlan_dev_create);
3278 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
3279 struct net_device *dev)
3281 struct vxlan_dev *vxlan, *next;
3282 LIST_HEAD(list_kill);
3284 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3285 struct vxlan_rdst *dst = &vxlan->default_dst;
3287 /* In case we created vxlan device with carrier
3288 * and we loose the carrier due to module unload
3289 * we also need to remove vxlan device. In other
3290 * cases, it's not necessary and remote_ifindex
3291 * is 0 here, so no matches.
3293 if (dst->remote_ifindex == dev->ifindex)
3294 vxlan_dellink(vxlan->dev, &list_kill);
3297 unregister_netdevice_many(&list_kill);
3300 static int vxlan_netdevice_event(struct notifier_block *unused,
3301 unsigned long event, void *ptr)
3303 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3304 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
3306 if (event == NETDEV_UNREGISTER)
3307 vxlan_handle_lowerdev_unregister(vn, dev);
3308 else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
3309 vxlan_push_rx_ports(dev);
3314 static struct notifier_block vxlan_notifier_block __read_mostly = {
3315 .notifier_call = vxlan_netdevice_event,
3318 static __net_init int vxlan_init_net(struct net *net)
3320 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3323 INIT_LIST_HEAD(&vn->vxlan_list);
3324 spin_lock_init(&vn->sock_lock);
3326 for (h = 0; h < PORT_HASH_SIZE; ++h)
3327 INIT_HLIST_HEAD(&vn->sock_list[h]);
3332 static void __net_exit vxlan_exit_net(struct net *net)
3334 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3335 struct vxlan_dev *vxlan, *next;
3336 struct net_device *dev, *aux;
3340 for_each_netdev_safe(net, dev, aux)
3341 if (dev->rtnl_link_ops == &vxlan_link_ops)
3342 unregister_netdevice_queue(dev, &list);
3344 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3345 /* If vxlan->dev is in the same netns, it has already been added
3346 * to the list by the previous loop.
3348 if (!net_eq(dev_net(vxlan->dev), net)) {
3349 gro_cells_destroy(&vxlan->gro_cells);
3350 unregister_netdevice_queue(vxlan->dev, &list);
3354 unregister_netdevice_many(&list);
3358 static struct pernet_operations vxlan_net_ops = {
3359 .init = vxlan_init_net,
3360 .exit = vxlan_exit_net,
3361 .id = &vxlan_net_id,
3362 .size = sizeof(struct vxlan_net),
3365 static int __init vxlan_init_module(void)
3369 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
3371 rc = register_pernet_subsys(&vxlan_net_ops);
3375 rc = register_netdevice_notifier(&vxlan_notifier_block);
3379 rc = rtnl_link_register(&vxlan_link_ops);
3385 unregister_netdevice_notifier(&vxlan_notifier_block);
3387 unregister_pernet_subsys(&vxlan_net_ops);
3391 late_initcall(vxlan_init_module);
3393 static void __exit vxlan_cleanup_module(void)
3395 rtnl_link_unregister(&vxlan_link_ops);
3396 unregister_netdevice_notifier(&vxlan_notifier_block);
3397 unregister_pernet_subsys(&vxlan_net_ops);
3398 /* rcu_barrier() is called by netns */
3400 module_exit(vxlan_cleanup_module);
3402 MODULE_LICENSE("GPL");
3403 MODULE_VERSION(VXLAN_VERSION);
3404 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
3405 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
3406 MODULE_ALIAS_RTNL_LINK("vxlan");