2 * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/if_link.h>
19 #include <linux/if_ether.h>
20 #include <net/netlink.h>
21 #include <net/rtnetlink.h>
24 static size_t bond_get_slave_size(const struct net_device *bond_dev,
25 const struct net_device *slave_dev)
27 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_STATE */
28 nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_MII_STATUS */
29 nla_total_size(sizeof(u32)) + /* IFLA_BOND_SLAVE_LINK_FAILURE_COUNT */
30 nla_total_size(MAX_ADDR_LEN) + /* IFLA_BOND_SLAVE_PERM_HWADDR */
31 nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_QUEUE_ID */
32 nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
36 static int bond_fill_slave_info(struct sk_buff *skb,
37 const struct net_device *bond_dev,
38 const struct net_device *slave_dev)
40 struct slave *slave = bond_slave_get_rtnl(slave_dev);
42 if (nla_put_u8(skb, IFLA_BOND_SLAVE_STATE, bond_slave_state(slave)))
45 if (nla_put_u8(skb, IFLA_BOND_SLAVE_MII_STATUS, slave->link))
48 if (nla_put_u32(skb, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT,
49 slave->link_failure_count))
52 if (nla_put(skb, IFLA_BOND_SLAVE_PERM_HWADDR,
53 slave_dev->addr_len, slave->perm_hwaddr))
56 if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
59 if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
60 const struct aggregator *agg;
62 agg = SLAVE_AD_INFO(slave)->port.aggregator;
64 if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
65 agg->aggregator_identifier))
75 static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
76 [IFLA_BOND_MODE] = { .type = NLA_U8 },
77 [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
78 [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
79 [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
80 [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
81 [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
82 [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
83 [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
84 [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
85 [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
86 [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
87 [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
88 [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
89 [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
90 [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
91 [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
92 [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
93 [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
94 [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
95 [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
96 [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
97 [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
98 [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
101 static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
103 if (tb[IFLA_ADDRESS]) {
104 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
106 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
107 return -EADDRNOTAVAIL;
112 static int bond_changelink(struct net_device *bond_dev,
113 struct nlattr *tb[], struct nlattr *data[])
115 struct bonding *bond = netdev_priv(bond_dev);
116 struct bond_opt_value newval;
123 if (data[IFLA_BOND_MODE]) {
124 int mode = nla_get_u8(data[IFLA_BOND_MODE]);
126 bond_opt_initval(&newval, mode);
127 err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
131 if (data[IFLA_BOND_ACTIVE_SLAVE]) {
132 int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
133 struct net_device *slave_dev;
134 char *active_slave = "";
137 slave_dev = __dev_get_by_index(dev_net(bond_dev),
141 active_slave = slave_dev->name;
143 bond_opt_initstr(&newval, active_slave);
144 err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
148 if (data[IFLA_BOND_MIIMON]) {
149 miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
151 bond_opt_initval(&newval, miimon);
152 err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
156 if (data[IFLA_BOND_UPDELAY]) {
157 int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
159 bond_opt_initval(&newval, updelay);
160 err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
164 if (data[IFLA_BOND_DOWNDELAY]) {
165 int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
167 bond_opt_initval(&newval, downdelay);
168 err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
172 if (data[IFLA_BOND_USE_CARRIER]) {
173 int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
175 bond_opt_initval(&newval, use_carrier);
176 err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
180 if (data[IFLA_BOND_ARP_INTERVAL]) {
181 int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
183 if (arp_interval && miimon) {
184 pr_err("%s: ARP monitoring cannot be used with MII monitoring\n",
189 bond_opt_initval(&newval, arp_interval);
190 err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
194 if (data[IFLA_BOND_ARP_IP_TARGET]) {
198 bond_option_arp_ip_targets_clear(bond);
199 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
202 if (nla_len(attr) < sizeof(target))
205 target = nla_get_be32(attr);
207 bond_opt_initval(&newval, (__force u64)target);
208 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
214 if (i == 0 && bond->params.arp_interval)
215 pr_warn("%s: Removing last arp target with arp_interval on\n",
220 if (data[IFLA_BOND_ARP_VALIDATE]) {
221 int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
223 if (arp_validate && miimon) {
224 pr_err("%s: ARP validating cannot be used with MII monitoring\n",
229 bond_opt_initval(&newval, arp_validate);
230 err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
234 if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
235 int arp_all_targets =
236 nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
238 bond_opt_initval(&newval, arp_all_targets);
239 err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
243 if (data[IFLA_BOND_PRIMARY]) {
244 int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
245 struct net_device *dev;
248 dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
252 bond_opt_initstr(&newval, primary);
253 err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
257 if (data[IFLA_BOND_PRIMARY_RESELECT]) {
258 int primary_reselect =
259 nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
261 bond_opt_initval(&newval, primary_reselect);
262 err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
266 if (data[IFLA_BOND_FAIL_OVER_MAC]) {
268 nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
270 bond_opt_initval(&newval, fail_over_mac);
271 err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
275 if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
276 int xmit_hash_policy =
277 nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
279 bond_opt_initval(&newval, xmit_hash_policy);
280 err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
284 if (data[IFLA_BOND_RESEND_IGMP]) {
286 nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
288 bond_opt_initval(&newval, resend_igmp);
289 err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval);
293 if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
295 nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
297 bond_opt_initval(&newval, num_peer_notif);
298 err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
302 if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
303 int all_slaves_active =
304 nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
306 bond_opt_initval(&newval, all_slaves_active);
307 err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval);
311 if (data[IFLA_BOND_MIN_LINKS]) {
313 nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
315 bond_opt_initval(&newval, min_links);
316 err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
320 if (data[IFLA_BOND_LP_INTERVAL]) {
322 nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
324 bond_opt_initval(&newval, lp_interval);
325 err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval);
329 if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
330 int packets_per_slave =
331 nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
333 bond_opt_initval(&newval, packets_per_slave);
334 err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
338 if (data[IFLA_BOND_AD_LACP_RATE]) {
340 nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
342 bond_opt_initval(&newval, lacp_rate);
343 err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
347 if (data[IFLA_BOND_AD_SELECT]) {
349 nla_get_u8(data[IFLA_BOND_AD_SELECT]);
351 bond_opt_initval(&newval, ad_select);
352 err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
359 static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
360 struct nlattr *tb[], struct nlattr *data[])
364 err = bond_changelink(bond_dev, tb, data);
368 return register_netdevice(bond_dev);
371 static size_t bond_get_size(const struct net_device *bond_dev)
373 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
374 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
375 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
376 nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
377 nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
378 nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
379 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
380 /* IFLA_BOND_ARP_IP_TARGET */
381 nla_total_size(sizeof(struct nlattr)) +
382 nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
383 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
384 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
385 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
386 nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
387 nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
388 nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
389 nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
390 nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
391 nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
392 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
393 nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
394 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
395 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
396 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
397 nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
398 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
399 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
400 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
401 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
402 nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
406 static int bond_fill_info(struct sk_buff *skb,
407 const struct net_device *bond_dev)
409 struct bonding *bond = netdev_priv(bond_dev);
410 struct net_device *slave_dev = bond_option_active_slave_get(bond);
411 struct nlattr *targets;
412 unsigned int packets_per_slave;
413 int i, targets_added;
415 if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
416 goto nla_put_failure;
419 nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
420 goto nla_put_failure;
422 if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
423 goto nla_put_failure;
425 if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
426 bond->params.updelay * bond->params.miimon))
427 goto nla_put_failure;
429 if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
430 bond->params.downdelay * bond->params.miimon))
431 goto nla_put_failure;
433 if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
434 goto nla_put_failure;
436 if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
437 goto nla_put_failure;
439 targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
441 goto nla_put_failure;
444 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
445 if (bond->params.arp_targets[i]) {
446 nla_put_be32(skb, i, bond->params.arp_targets[i]);
452 nla_nest_end(skb, targets);
454 nla_nest_cancel(skb, targets);
456 if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
457 goto nla_put_failure;
459 if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
460 bond->params.arp_all_targets))
461 goto nla_put_failure;
463 if (bond->primary_slave &&
464 nla_put_u32(skb, IFLA_BOND_PRIMARY,
465 bond->primary_slave->dev->ifindex))
466 goto nla_put_failure;
468 if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
469 bond->params.primary_reselect))
470 goto nla_put_failure;
472 if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
473 bond->params.fail_over_mac))
474 goto nla_put_failure;
476 if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
477 bond->params.xmit_policy))
478 goto nla_put_failure;
480 if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
481 bond->params.resend_igmp))
482 goto nla_put_failure;
484 if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
485 bond->params.num_peer_notif))
486 goto nla_put_failure;
488 if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
489 bond->params.all_slaves_active))
490 goto nla_put_failure;
492 if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
493 bond->params.min_links))
494 goto nla_put_failure;
496 if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
497 bond->params.lp_interval))
498 goto nla_put_failure;
500 packets_per_slave = bond->params.packets_per_slave;
501 if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
503 goto nla_put_failure;
505 if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
506 bond->params.lacp_fast))
507 goto nla_put_failure;
509 if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
510 bond->params.ad_select))
511 goto nla_put_failure;
513 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
516 if (!bond_3ad_get_active_agg_info(bond, &info)) {
519 nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
521 goto nla_put_failure;
523 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
525 goto nla_put_failure;
526 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
528 goto nla_put_failure;
529 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
531 goto nla_put_failure;
532 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
534 goto nla_put_failure;
535 if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
536 sizeof(info.partner_system),
537 &info.partner_system))
538 goto nla_put_failure;
540 nla_nest_end(skb, nest);
550 struct rtnl_link_ops bond_link_ops __read_mostly = {
552 .priv_size = sizeof(struct bonding),
554 .maxtype = IFLA_BOND_MAX,
555 .policy = bond_policy,
556 .validate = bond_validate,
557 .newlink = bond_newlink,
558 .changelink = bond_changelink,
559 .get_size = bond_get_size,
560 .fill_info = bond_fill_info,
561 .get_num_tx_queues = bond_get_num_tx_queues,
562 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
564 .get_slave_size = bond_get_slave_size,
565 .fill_slave_info = bond_fill_slave_info,
568 int __init bond_netlink_init(void)
570 return rtnl_link_register(&bond_link_ops);
573 void bond_netlink_fini(void)
575 rtnl_link_unregister(&bond_link_ops);
578 MODULE_ALIAS_RTNL_LINK("bond");