2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/sctp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/in6.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_vlan.h>
33 #include <net/checksum.h>
34 #include <net/dsfield.h>
35 #include <net/sctp/checksum.h>
41 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
42 struct sw_flow_key *key,
43 const struct nlattr *attr, int len);
45 struct deferred_action {
47 const struct nlattr *actions;
49 /* Store pkt_key clone when creating deferred action. */
50 struct sw_flow_key pkt_key;
53 #define DEFERRED_ACTION_FIFO_SIZE 10
57 /* Deferred action fifo queue storage. */
58 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
61 static struct action_fifo __percpu *action_fifos;
62 static DEFINE_PER_CPU(int, exec_actions_level);
64 static void action_fifo_init(struct action_fifo *fifo)
70 static bool action_fifo_is_empty(struct action_fifo *fifo)
72 return (fifo->head == fifo->tail);
75 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
77 if (action_fifo_is_empty(fifo))
80 return &fifo->fifo[fifo->tail++];
83 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
85 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
88 return &fifo->fifo[fifo->head++];
91 /* Return true if fifo is not full */
92 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
93 struct sw_flow_key *key,
94 const struct nlattr *attr)
96 struct action_fifo *fifo;
97 struct deferred_action *da;
99 fifo = this_cpu_ptr(action_fifos);
100 da = action_fifo_put(fifo);
110 static int make_writable(struct sk_buff *skb, int write_len)
112 if (!pskb_may_pull(skb, write_len))
115 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
118 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
121 /* remove VLAN header from packet and update csum accordingly. */
122 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
124 struct vlan_hdr *vhdr;
127 err = make_writable(skb, VLAN_ETH_HLEN);
131 if (skb->ip_summed == CHECKSUM_COMPLETE)
132 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
133 + (2 * ETH_ALEN), VLAN_HLEN, 0));
135 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
136 *current_tci = vhdr->h_vlan_TCI;
138 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
139 __skb_pull(skb, VLAN_HLEN);
141 vlan_set_encap_proto(skb, vhdr);
142 skb->mac_header += VLAN_HLEN;
143 if (skb_network_offset(skb) < ETH_HLEN)
144 skb_set_network_header(skb, ETH_HLEN);
145 skb_reset_mac_len(skb);
150 static int pop_vlan(struct sk_buff *skb)
155 if (likely(vlan_tx_tag_present(skb))) {
158 if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
159 skb->len < VLAN_ETH_HLEN))
162 err = __pop_vlan_tci(skb, &tci);
166 /* move next vlan tag to hw accel tag */
167 if (likely(skb->protocol != htons(ETH_P_8021Q) ||
168 skb->len < VLAN_ETH_HLEN))
171 err = __pop_vlan_tci(skb, &tci);
175 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
179 static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
181 if (unlikely(vlan_tx_tag_present(skb))) {
184 /* push down current VLAN tag */
185 current_tag = vlan_tx_tag_get(skb);
187 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
192 if (skb->ip_summed == CHECKSUM_COMPLETE)
193 skb->csum = csum_add(skb->csum, csum_partial(skb->data
194 + (2 * ETH_ALEN), VLAN_HLEN, 0));
197 __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
201 static int set_eth_addr(struct sk_buff *skb,
202 const struct ovs_key_ethernet *eth_key)
205 err = make_writable(skb, ETH_HLEN);
209 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
211 ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
212 ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
214 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
219 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
220 __be32 *addr, __be32 new_addr)
222 int transport_len = skb->len - skb_transport_offset(skb);
224 if (nh->protocol == IPPROTO_TCP) {
225 if (likely(transport_len >= sizeof(struct tcphdr)))
226 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
228 } else if (nh->protocol == IPPROTO_UDP) {
229 if (likely(transport_len >= sizeof(struct udphdr))) {
230 struct udphdr *uh = udp_hdr(skb);
232 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
233 inet_proto_csum_replace4(&uh->check, skb,
236 uh->check = CSUM_MANGLED_0;
241 csum_replace4(&nh->check, *addr, new_addr);
246 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
247 __be32 addr[4], const __be32 new_addr[4])
249 int transport_len = skb->len - skb_transport_offset(skb);
251 if (l4_proto == NEXTHDR_TCP) {
252 if (likely(transport_len >= sizeof(struct tcphdr)))
253 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
255 } else if (l4_proto == NEXTHDR_UDP) {
256 if (likely(transport_len >= sizeof(struct udphdr))) {
257 struct udphdr *uh = udp_hdr(skb);
259 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
260 inet_proto_csum_replace16(&uh->check, skb,
263 uh->check = CSUM_MANGLED_0;
266 } else if (l4_proto == NEXTHDR_ICMP) {
267 if (likely(transport_len >= sizeof(struct icmp6hdr)))
268 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
269 skb, addr, new_addr, 1);
273 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
274 __be32 addr[4], const __be32 new_addr[4],
275 bool recalculate_csum)
277 if (recalculate_csum)
278 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
281 memcpy(addr, new_addr, sizeof(__be32[4]));
284 static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
286 nh->priority = tc >> 4;
287 nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
290 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
292 nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
293 nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
294 nh->flow_lbl[2] = fl & 0x000000FF;
297 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
299 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
303 static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
308 err = make_writable(skb, skb_network_offset(skb) +
309 sizeof(struct iphdr));
315 if (ipv4_key->ipv4_src != nh->saddr)
316 set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
318 if (ipv4_key->ipv4_dst != nh->daddr)
319 set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
321 if (ipv4_key->ipv4_tos != nh->tos)
322 ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
324 if (ipv4_key->ipv4_ttl != nh->ttl)
325 set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
330 static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
337 err = make_writable(skb, skb_network_offset(skb) +
338 sizeof(struct ipv6hdr));
343 saddr = (__be32 *)&nh->saddr;
344 daddr = (__be32 *)&nh->daddr;
346 if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src)))
347 set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
348 ipv6_key->ipv6_src, true);
350 if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
351 unsigned int offset = 0;
352 int flags = IP6_FH_F_SKIP_RH;
353 bool recalc_csum = true;
355 if (ipv6_ext_hdr(nh->nexthdr))
356 recalc_csum = ipv6_find_hdr(skb, &offset,
357 NEXTHDR_ROUTING, NULL,
358 &flags) != NEXTHDR_ROUTING;
360 set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
361 ipv6_key->ipv6_dst, recalc_csum);
364 set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
365 set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
366 nh->hop_limit = ipv6_key->ipv6_hlimit;
371 /* Must follow make_writable() since that can move the skb data. */
372 static void set_tp_port(struct sk_buff *skb, __be16 *port,
373 __be16 new_port, __sum16 *check)
375 inet_proto_csum_replace2(check, skb, *port, new_port, 0);
380 static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
382 struct udphdr *uh = udp_hdr(skb);
384 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
385 set_tp_port(skb, port, new_port, &uh->check);
388 uh->check = CSUM_MANGLED_0;
395 static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
400 err = make_writable(skb, skb_transport_offset(skb) +
401 sizeof(struct udphdr));
406 if (udp_port_key->udp_src != uh->source)
407 set_udp_port(skb, &uh->source, udp_port_key->udp_src);
409 if (udp_port_key->udp_dst != uh->dest)
410 set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
415 static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
420 err = make_writable(skb, skb_transport_offset(skb) +
421 sizeof(struct tcphdr));
426 if (tcp_port_key->tcp_src != th->source)
427 set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
429 if (tcp_port_key->tcp_dst != th->dest)
430 set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
435 static int set_sctp(struct sk_buff *skb,
436 const struct ovs_key_sctp *sctp_port_key)
440 unsigned int sctphoff = skb_transport_offset(skb);
442 err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
447 if (sctp_port_key->sctp_src != sh->source ||
448 sctp_port_key->sctp_dst != sh->dest) {
449 __le32 old_correct_csum, new_csum, old_csum;
451 old_csum = sh->checksum;
452 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
454 sh->source = sctp_port_key->sctp_src;
455 sh->dest = sctp_port_key->sctp_dst;
457 new_csum = sctp_compute_cksum(skb, sctphoff);
459 /* Carry any checksum errors through. */
460 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
468 static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
475 vport = ovs_vport_rcu(dp, out_port);
476 if (unlikely(!vport)) {
481 ovs_vport_send(vport, skb);
485 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
486 struct sw_flow_key *key, const struct nlattr *attr)
488 struct dp_upcall_info upcall;
489 const struct nlattr *a;
492 upcall.cmd = OVS_PACKET_CMD_ACTION;
494 upcall.userdata = NULL;
497 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
498 a = nla_next(a, &rem)) {
499 switch (nla_type(a)) {
500 case OVS_USERSPACE_ATTR_USERDATA:
504 case OVS_USERSPACE_ATTR_PID:
505 upcall.portid = nla_get_u32(a);
510 return ovs_dp_upcall(dp, skb, &upcall);
513 static bool last_action(const struct nlattr *a, int rem)
515 return a->nla_len == rem;
518 static int sample(struct datapath *dp, struct sk_buff *skb,
519 struct sw_flow_key *key, const struct nlattr *attr)
521 const struct nlattr *acts_list = NULL;
522 const struct nlattr *a;
525 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
526 a = nla_next(a, &rem)) {
527 switch (nla_type(a)) {
528 case OVS_SAMPLE_ATTR_PROBABILITY:
529 if (prandom_u32() >= nla_get_u32(a))
533 case OVS_SAMPLE_ATTR_ACTIONS:
539 rem = nla_len(acts_list);
540 a = nla_data(acts_list);
542 /* Actions list is empty, do nothing */
546 /* The only known usage of sample action is having a single user-space
547 * action. Treat this usage as a special case.
548 * The output_userspace() should clone the skb to be sent to the
549 * user space. This skb will be consumed by its caller.
551 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
552 last_action(a, rem)))
553 return output_userspace(dp, skb, key, a);
555 skb = skb_clone(skb, GFP_ATOMIC);
557 /* Skip the sample action when out of memory. */
560 if (!add_deferred_actions(skb, key, a)) {
562 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
570 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
571 const struct nlattr *attr)
573 struct ovs_action_hash *hash_act = nla_data(attr);
576 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
577 hash = skb_get_hash(skb);
578 hash = jhash_1word(hash, hash_act->hash_basis);
582 key->ovs_flow_hash = hash;
585 static int execute_set_action(struct sk_buff *skb,
586 const struct nlattr *nested_attr)
590 switch (nla_type(nested_attr)) {
591 case OVS_KEY_ATTR_PRIORITY:
592 skb->priority = nla_get_u32(nested_attr);
595 case OVS_KEY_ATTR_SKB_MARK:
596 skb->mark = nla_get_u32(nested_attr);
599 case OVS_KEY_ATTR_TUNNEL_INFO:
600 OVS_CB(skb)->egress_tun_info = nla_data(nested_attr);
603 case OVS_KEY_ATTR_ETHERNET:
604 err = set_eth_addr(skb, nla_data(nested_attr));
607 case OVS_KEY_ATTR_IPV4:
608 err = set_ipv4(skb, nla_data(nested_attr));
611 case OVS_KEY_ATTR_IPV6:
612 err = set_ipv6(skb, nla_data(nested_attr));
615 case OVS_KEY_ATTR_TCP:
616 err = set_tcp(skb, nla_data(nested_attr));
619 case OVS_KEY_ATTR_UDP:
620 err = set_udp(skb, nla_data(nested_attr));
623 case OVS_KEY_ATTR_SCTP:
624 err = set_sctp(skb, nla_data(nested_attr));
631 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
632 struct sw_flow_key *key,
633 const struct nlattr *a, int rem)
635 struct deferred_action *da;
638 err = ovs_flow_key_update(skb, key);
642 if (!last_action(a, rem)) {
643 /* Recirc action is the not the last action
644 * of the action list, need to clone the skb.
646 skb = skb_clone(skb, GFP_ATOMIC);
648 /* Skip the recirc action when out of memory, but
649 * continue on with the rest of the action list.
655 da = add_deferred_actions(skb, key, NULL);
657 da->pkt_key.recirc_id = nla_get_u32(a);
662 pr_warn("%s: deferred action limit reached, drop recirc action\n",
669 /* Execute a list of actions against 'skb'. */
670 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
671 struct sw_flow_key *key,
672 const struct nlattr *attr, int len)
674 /* Every output action needs a separate clone of 'skb', but the common
675 * case is just a single output action, so that doing a clone and
676 * then freeing the original skbuff is wasteful. So the following code
677 * is slightly obscure just to avoid that. */
679 const struct nlattr *a;
682 for (a = attr, rem = len; rem > 0;
683 a = nla_next(a, &rem)) {
686 if (prev_port != -1) {
687 do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
691 switch (nla_type(a)) {
692 case OVS_ACTION_ATTR_OUTPUT:
693 prev_port = nla_get_u32(a);
696 case OVS_ACTION_ATTR_USERSPACE:
697 output_userspace(dp, skb, key, a);
700 case OVS_ACTION_ATTR_HASH:
701 execute_hash(skb, key, a);
704 case OVS_ACTION_ATTR_PUSH_VLAN:
705 err = push_vlan(skb, nla_data(a));
706 if (unlikely(err)) /* skb already freed. */
710 case OVS_ACTION_ATTR_POP_VLAN:
714 case OVS_ACTION_ATTR_RECIRC:
715 err = execute_recirc(dp, skb, key, a, rem);
716 if (last_action(a, rem)) {
717 /* If this is the last action, the skb has
718 * been consumed or freed.
719 * Return immediately.
725 case OVS_ACTION_ATTR_SET:
726 err = execute_set_action(skb, nla_data(a));
729 case OVS_ACTION_ATTR_SAMPLE:
730 err = sample(dp, skb, key, a);
741 do_output(dp, skb, prev_port);
748 static void process_deferred_actions(struct datapath *dp)
750 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
752 /* Do not touch the FIFO in case there is no deferred actions. */
753 if (action_fifo_is_empty(fifo))
756 /* Finishing executing all deferred actions. */
758 struct deferred_action *da = action_fifo_get(fifo);
759 struct sk_buff *skb = da->skb;
760 struct sw_flow_key *key = &da->pkt_key;
761 const struct nlattr *actions = da->actions;
764 do_execute_actions(dp, skb, key, actions,
767 ovs_dp_process_packet(skb, key);
768 } while (!action_fifo_is_empty(fifo));
770 /* Reset FIFO for the next packet. */
771 action_fifo_init(fifo);
774 /* Execute a list of actions against 'skb'. */
775 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
776 struct sw_flow_key *key)
778 int level = this_cpu_read(exec_actions_level);
779 struct sw_flow_actions *acts;
782 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
784 this_cpu_inc(exec_actions_level);
785 OVS_CB(skb)->egress_tun_info = NULL;
786 err = do_execute_actions(dp, skb, key,
787 acts->actions, acts->actions_len);
790 process_deferred_actions(dp);
792 this_cpu_dec(exec_actions_level);
796 int action_fifos_init(void)
798 action_fifos = alloc_percpu(struct action_fifo);
805 void action_fifos_exit(void)
807 free_percpu(action_fifos);