kernel: add kmod-fb-tft
[oweals/openwrt.git] / target / linux / generic / backport-4.14 / 352-v4.18-netfilter-nf_flow_table-rename-nf_flow_table.c-to-nf.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Fri, 16 Feb 2018 11:08:47 +0100
3 Subject: [PATCH] netfilter: nf_flow_table: rename nf_flow_table.c to
4  nf_flow_table_core.c
5
6 Preparation for adding more code to the same module
7
8 Signed-off-by: Felix Fietkau <nbd@nbd.name>
9 ---
10  rename net/netfilter/{nf_flow_table.c => nf_flow_table_core.c} (100%)
11
12 --- a/net/netfilter/Makefile
13 +++ b/net/netfilter/Makefile
14 @@ -113,6 +113,8 @@ obj-$(CONFIG_NFT_FWD_NETDEV)        += nft_fwd_
15  
16  # flow table infrastructure
17  obj-$(CONFIG_NF_FLOW_TABLE)    += nf_flow_table.o
18 +nf_flow_table-objs := nf_flow_table_core.o
19 +
20  obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
21  
22  # generic X tables 
23 --- a/net/netfilter/nf_flow_table.c
24 +++ /dev/null
25 @@ -1,462 +0,0 @@
26 -#include <linux/kernel.h>
27 -#include <linux/init.h>
28 -#include <linux/module.h>
29 -#include <linux/netfilter.h>
30 -#include <linux/rhashtable.h>
31 -#include <linux/netdevice.h>
32 -#include <net/ip.h>
33 -#include <net/ip6_route.h>
34 -#include <net/netfilter/nf_tables.h>
35 -#include <net/netfilter/nf_flow_table.h>
36 -#include <net/netfilter/nf_conntrack.h>
37 -#include <net/netfilter/nf_conntrack_core.h>
38 -#include <net/netfilter/nf_conntrack_tuple.h>
39 -
40 -struct flow_offload_entry {
41 -       struct flow_offload     flow;
42 -       struct nf_conn          *ct;
43 -       struct rcu_head         rcu_head;
44 -};
45 -
46 -static void
47 -flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
48 -                     struct nf_flow_route *route,
49 -                     enum flow_offload_tuple_dir dir)
50 -{
51 -       struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
52 -       struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
53 -       struct dst_entry *dst = route->tuple[dir].dst;
54 -
55 -       ft->dir = dir;
56 -
57 -       switch (ctt->src.l3num) {
58 -       case NFPROTO_IPV4:
59 -               ft->src_v4 = ctt->src.u3.in;
60 -               ft->dst_v4 = ctt->dst.u3.in;
61 -               ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
62 -               break;
63 -       case NFPROTO_IPV6:
64 -               ft->src_v6 = ctt->src.u3.in6;
65 -               ft->dst_v6 = ctt->dst.u3.in6;
66 -               ft->mtu = ip6_dst_mtu_forward(dst);
67 -               break;
68 -       }
69 -
70 -       ft->l3proto = ctt->src.l3num;
71 -       ft->l4proto = ctt->dst.protonum;
72 -       ft->src_port = ctt->src.u.tcp.port;
73 -       ft->dst_port = ctt->dst.u.tcp.port;
74 -
75 -       ft->iifidx = route->tuple[dir].ifindex;
76 -       ft->oifidx = route->tuple[!dir].ifindex;
77 -       ft->dst_cache = dst;
78 -}
79 -
80 -struct flow_offload *
81 -flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
82 -{
83 -       struct flow_offload_entry *entry;
84 -       struct flow_offload *flow;
85 -
86 -       if (unlikely(nf_ct_is_dying(ct) ||
87 -           !atomic_inc_not_zero(&ct->ct_general.use)))
88 -               return NULL;
89 -
90 -       entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
91 -       if (!entry)
92 -               goto err_ct_refcnt;
93 -
94 -       flow = &entry->flow;
95 -
96 -       if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
97 -               goto err_dst_cache_original;
98 -
99 -       if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
100 -               goto err_dst_cache_reply;
101 -
102 -       entry->ct = ct;
103 -
104 -       flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL);
105 -       flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY);
106 -
107 -       if (ct->status & IPS_SRC_NAT)
108 -               flow->flags |= FLOW_OFFLOAD_SNAT;
109 -       else if (ct->status & IPS_DST_NAT)
110 -               flow->flags |= FLOW_OFFLOAD_DNAT;
111 -
112 -       return flow;
113 -
114 -err_dst_cache_reply:
115 -       dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
116 -err_dst_cache_original:
117 -       kfree(entry);
118 -err_ct_refcnt:
119 -       nf_ct_put(ct);
120 -
121 -       return NULL;
122 -}
123 -EXPORT_SYMBOL_GPL(flow_offload_alloc);
124 -
125 -void flow_offload_free(struct flow_offload *flow)
126 -{
127 -       struct flow_offload_entry *e;
128 -
129 -       dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
130 -       dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
131 -       e = container_of(flow, struct flow_offload_entry, flow);
132 -       nf_ct_delete(e->ct, 0, 0);
133 -       nf_ct_put(e->ct);
134 -       kfree_rcu(e, rcu_head);
135 -}
136 -EXPORT_SYMBOL_GPL(flow_offload_free);
137 -
138 -void flow_offload_dead(struct flow_offload *flow)
139 -{
140 -       flow->flags |= FLOW_OFFLOAD_DYING;
141 -}
142 -EXPORT_SYMBOL_GPL(flow_offload_dead);
143 -
144 -int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
145 -{
146 -       flow->timeout = (u32)jiffies;
147 -
148 -       rhashtable_insert_fast(&flow_table->rhashtable,
149 -                              &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
150 -                              *flow_table->type->params);
151 -       rhashtable_insert_fast(&flow_table->rhashtable,
152 -                              &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
153 -                              *flow_table->type->params);
154 -       return 0;
155 -}
156 -EXPORT_SYMBOL_GPL(flow_offload_add);
157 -
158 -static void flow_offload_del(struct nf_flowtable *flow_table,
159 -                            struct flow_offload *flow)
160 -{
161 -       rhashtable_remove_fast(&flow_table->rhashtable,
162 -                              &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
163 -                              *flow_table->type->params);
164 -       rhashtable_remove_fast(&flow_table->rhashtable,
165 -                              &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
166 -                              *flow_table->type->params);
167 -
168 -       flow_offload_free(flow);
169 -}
170 -
171 -struct flow_offload_tuple_rhash *
172 -flow_offload_lookup(struct nf_flowtable *flow_table,
173 -                   struct flow_offload_tuple *tuple)
174 -{
175 -       return rhashtable_lookup_fast(&flow_table->rhashtable, tuple,
176 -                                     *flow_table->type->params);
177 -}
178 -EXPORT_SYMBOL_GPL(flow_offload_lookup);
179 -
180 -int nf_flow_table_iterate(struct nf_flowtable *flow_table,
181 -                         void (*iter)(struct flow_offload *flow, void *data),
182 -                         void *data)
183 -{
184 -       struct flow_offload_tuple_rhash *tuplehash;
185 -       struct rhashtable_iter hti;
186 -       struct flow_offload *flow;
187 -       int err;
188 -
189 -       err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
190 -       if (err)
191 -               return err;
192 -
193 -       rhashtable_walk_start(&hti);
194 -
195 -       while ((tuplehash = rhashtable_walk_next(&hti))) {
196 -               if (IS_ERR(tuplehash)) {
197 -                       err = PTR_ERR(tuplehash);
198 -                       if (err != -EAGAIN)
199 -                               goto out;
200 -
201 -                       continue;
202 -               }
203 -               if (tuplehash->tuple.dir)
204 -                       continue;
205 -
206 -               flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
207 -
208 -               iter(flow, data);
209 -       }
210 -out:
211 -       rhashtable_walk_stop(&hti);
212 -       rhashtable_walk_exit(&hti);
213 -
214 -       return err;
215 -}
216 -EXPORT_SYMBOL_GPL(nf_flow_table_iterate);
217 -
218 -static inline bool nf_flow_has_expired(const struct flow_offload *flow)
219 -{
220 -       return (__s32)(flow->timeout - (u32)jiffies) <= 0;
221 -}
222 -
223 -static inline bool nf_flow_is_dying(const struct flow_offload *flow)
224 -{
225 -       return flow->flags & FLOW_OFFLOAD_DYING;
226 -}
227 -
228 -static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
229 -{
230 -       struct flow_offload_tuple_rhash *tuplehash;
231 -       struct rhashtable_iter hti;
232 -       struct flow_offload *flow;
233 -       int err;
234 -
235 -       err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
236 -       if (err)
237 -               return 0;
238 -
239 -       rhashtable_walk_start(&hti);
240 -
241 -       while ((tuplehash = rhashtable_walk_next(&hti))) {
242 -               if (IS_ERR(tuplehash)) {
243 -                       err = PTR_ERR(tuplehash);
244 -                       if (err != -EAGAIN)
245 -                               goto out;
246 -
247 -                       continue;
248 -               }
249 -               if (tuplehash->tuple.dir)
250 -                       continue;
251 -
252 -               flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
253 -
254 -               if (nf_flow_has_expired(flow) ||
255 -                   nf_flow_is_dying(flow))
256 -                       flow_offload_del(flow_table, flow);
257 -       }
258 -out:
259 -       rhashtable_walk_stop(&hti);
260 -       rhashtable_walk_exit(&hti);
261 -
262 -       return 1;
263 -}
264 -
265 -void nf_flow_offload_work_gc(struct work_struct *work)
266 -{
267 -       struct nf_flowtable *flow_table;
268 -
269 -       flow_table = container_of(work, struct nf_flowtable, gc_work.work);
270 -       nf_flow_offload_gc_step(flow_table);
271 -       queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
272 -}
273 -EXPORT_SYMBOL_GPL(nf_flow_offload_work_gc);
274 -
275 -static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
276 -{
277 -       const struct flow_offload_tuple *tuple = data;
278 -
279 -       return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
280 -}
281 -
282 -static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
283 -{
284 -       const struct flow_offload_tuple_rhash *tuplehash = data;
285 -
286 -       return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
287 -}
288 -
289 -static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
290 -                                       const void *ptr)
291 -{
292 -       const struct flow_offload_tuple *tuple = arg->key;
293 -       const struct flow_offload_tuple_rhash *x = ptr;
294 -
295 -       if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
296 -               return 1;
297 -
298 -       return 0;
299 -}
300 -
301 -const struct rhashtable_params nf_flow_offload_rhash_params = {
302 -       .head_offset            = offsetof(struct flow_offload_tuple_rhash, node),
303 -       .hashfn                 = flow_offload_hash,
304 -       .obj_hashfn             = flow_offload_hash_obj,
305 -       .obj_cmpfn              = flow_offload_hash_cmp,
306 -       .automatic_shrinking    = true,
307 -};
308 -EXPORT_SYMBOL_GPL(nf_flow_offload_rhash_params);
309 -
310 -static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
311 -                               __be16 port, __be16 new_port)
312 -{
313 -       struct tcphdr *tcph;
314 -
315 -       if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
316 -           skb_try_make_writable(skb, thoff + sizeof(*tcph)))
317 -               return -1;
318 -
319 -       tcph = (void *)(skb_network_header(skb) + thoff);
320 -       inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
321 -
322 -       return 0;
323 -}
324 -
325 -static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
326 -                               __be16 port, __be16 new_port)
327 -{
328 -       struct udphdr *udph;
329 -
330 -       if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
331 -           skb_try_make_writable(skb, thoff + sizeof(*udph)))
332 -               return -1;
333 -
334 -       udph = (void *)(skb_network_header(skb) + thoff);
335 -       if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
336 -               inet_proto_csum_replace2(&udph->check, skb, port,
337 -                                        new_port, true);
338 -               if (!udph->check)
339 -                       udph->check = CSUM_MANGLED_0;
340 -       }
341 -
342 -       return 0;
343 -}
344 -
345 -static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
346 -                           u8 protocol, __be16 port, __be16 new_port)
347 -{
348 -       switch (protocol) {
349 -       case IPPROTO_TCP:
350 -               if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
351 -                       return NF_DROP;
352 -               break;
353 -       case IPPROTO_UDP:
354 -               if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
355 -                       return NF_DROP;
356 -               break;
357 -       }
358 -
359 -       return 0;
360 -}
361 -
362 -int nf_flow_snat_port(const struct flow_offload *flow,
363 -                     struct sk_buff *skb, unsigned int thoff,
364 -                     u8 protocol, enum flow_offload_tuple_dir dir)
365 -{
366 -       struct flow_ports *hdr;
367 -       __be16 port, new_port;
368 -
369 -       if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
370 -           skb_try_make_writable(skb, thoff + sizeof(*hdr)))
371 -               return -1;
372 -
373 -       hdr = (void *)(skb_network_header(skb) + thoff);
374 -
375 -       switch (dir) {
376 -       case FLOW_OFFLOAD_DIR_ORIGINAL:
377 -               port = hdr->source;
378 -               new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
379 -               hdr->source = new_port;
380 -               break;
381 -       case FLOW_OFFLOAD_DIR_REPLY:
382 -               port = hdr->dest;
383 -               new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
384 -               hdr->dest = new_port;
385 -               break;
386 -       default:
387 -               return -1;
388 -       }
389 -
390 -       return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
391 -}
392 -EXPORT_SYMBOL_GPL(nf_flow_snat_port);
393 -
394 -int nf_flow_dnat_port(const struct flow_offload *flow,
395 -                     struct sk_buff *skb, unsigned int thoff,
396 -                     u8 protocol, enum flow_offload_tuple_dir dir)
397 -{
398 -       struct flow_ports *hdr;
399 -       __be16 port, new_port;
400 -
401 -       if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
402 -           skb_try_make_writable(skb, thoff + sizeof(*hdr)))
403 -               return -1;
404 -
405 -       hdr = (void *)(skb_network_header(skb) + thoff);
406 -
407 -       switch (dir) {
408 -       case FLOW_OFFLOAD_DIR_ORIGINAL:
409 -               port = hdr->dest;
410 -               new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
411 -               hdr->dest = new_port;
412 -               break;
413 -       case FLOW_OFFLOAD_DIR_REPLY:
414 -               port = hdr->source;
415 -               new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
416 -               hdr->source = new_port;
417 -               break;
418 -       default:
419 -               return -1;
420 -       }
421 -
422 -       return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
423 -}
424 -EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
425 -
426 -static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
427 -{
428 -       struct net_device *dev = data;
429 -
430 -       if (dev && flow->tuplehash[0].tuple.iifidx != dev->ifindex)
431 -               return;
432 -
433 -       flow_offload_dead(flow);
434 -}
435 -
436 -static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
437 -                                         void *data)
438 -{
439 -       nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, data);
440 -       flush_delayed_work(&flowtable->gc_work);
441 -}
442 -
443 -void nf_flow_table_cleanup(struct net *net, struct net_device *dev)
444 -{
445 -       nft_flow_table_iterate(net, nf_flow_table_iterate_cleanup, dev);
446 -}
447 -EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
448 -
449 -void nf_flow_table_free(struct nf_flowtable *flow_table)
450 -{
451 -       nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
452 -       WARN_ON(!nf_flow_offload_gc_step(flow_table));
453 -}
454 -EXPORT_SYMBOL_GPL(nf_flow_table_free);
455 -
456 -static int nf_flow_table_netdev_event(struct notifier_block *this,
457 -                                     unsigned long event, void *ptr)
458 -{
459 -       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
460 -
461 -       if (event != NETDEV_DOWN)
462 -               return NOTIFY_DONE;
463 -
464 -       nf_flow_table_cleanup(dev_net(dev), dev);
465 -
466 -       return NOTIFY_DONE;
467 -}
468 -
469 -static struct notifier_block flow_offload_netdev_notifier = {
470 -       .notifier_call  = nf_flow_table_netdev_event,
471 -};
472 -
473 -static int __init nf_flow_table_module_init(void)
474 -{
475 -       return register_netdevice_notifier(&flow_offload_netdev_notifier);
476 -}
477 -
478 -static void __exit nf_flow_table_module_exit(void)
479 -{
480 -       unregister_netdevice_notifier(&flow_offload_netdev_notifier);
481 -}
482 -
483 -module_init(nf_flow_table_module_init);
484 -module_exit(nf_flow_table_module_exit);
485 -
486 -MODULE_LICENSE("GPL");
487 -MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
488 --- /dev/null
489 +++ b/net/netfilter/nf_flow_table_core.c
490 @@ -0,0 +1,462 @@
491 +#include <linux/kernel.h>
492 +#include <linux/init.h>
493 +#include <linux/module.h>
494 +#include <linux/netfilter.h>
495 +#include <linux/rhashtable.h>
496 +#include <linux/netdevice.h>
497 +#include <net/ip.h>
498 +#include <net/ip6_route.h>
499 +#include <net/netfilter/nf_tables.h>
500 +#include <net/netfilter/nf_flow_table.h>
501 +#include <net/netfilter/nf_conntrack.h>
502 +#include <net/netfilter/nf_conntrack_core.h>
503 +#include <net/netfilter/nf_conntrack_tuple.h>
504 +
505 +struct flow_offload_entry {
506 +       struct flow_offload     flow;
507 +       struct nf_conn          *ct;
508 +       struct rcu_head         rcu_head;
509 +};
510 +
511 +static void
512 +flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
513 +                     struct nf_flow_route *route,
514 +                     enum flow_offload_tuple_dir dir)
515 +{
516 +       struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
517 +       struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
518 +       struct dst_entry *dst = route->tuple[dir].dst;
519 +
520 +       ft->dir = dir;
521 +
522 +       switch (ctt->src.l3num) {
523 +       case NFPROTO_IPV4:
524 +               ft->src_v4 = ctt->src.u3.in;
525 +               ft->dst_v4 = ctt->dst.u3.in;
526 +               ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
527 +               break;
528 +       case NFPROTO_IPV6:
529 +               ft->src_v6 = ctt->src.u3.in6;
530 +               ft->dst_v6 = ctt->dst.u3.in6;
531 +               ft->mtu = ip6_dst_mtu_forward(dst);
532 +               break;
533 +       }
534 +
535 +       ft->l3proto = ctt->src.l3num;
536 +       ft->l4proto = ctt->dst.protonum;
537 +       ft->src_port = ctt->src.u.tcp.port;
538 +       ft->dst_port = ctt->dst.u.tcp.port;
539 +
540 +       ft->iifidx = route->tuple[dir].ifindex;
541 +       ft->oifidx = route->tuple[!dir].ifindex;
542 +       ft->dst_cache = dst;
543 +}
544 +
545 +struct flow_offload *
546 +flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
547 +{
548 +       struct flow_offload_entry *entry;
549 +       struct flow_offload *flow;
550 +
551 +       if (unlikely(nf_ct_is_dying(ct) ||
552 +           !atomic_inc_not_zero(&ct->ct_general.use)))
553 +               return NULL;
554 +
555 +       entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
556 +       if (!entry)
557 +               goto err_ct_refcnt;
558 +
559 +       flow = &entry->flow;
560 +
561 +       if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
562 +               goto err_dst_cache_original;
563 +
564 +       if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
565 +               goto err_dst_cache_reply;
566 +
567 +       entry->ct = ct;
568 +
569 +       flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL);
570 +       flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY);
571 +
572 +       if (ct->status & IPS_SRC_NAT)
573 +               flow->flags |= FLOW_OFFLOAD_SNAT;
574 +       else if (ct->status & IPS_DST_NAT)
575 +               flow->flags |= FLOW_OFFLOAD_DNAT;
576 +
577 +       return flow;
578 +
579 +err_dst_cache_reply:
580 +       dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
581 +err_dst_cache_original:
582 +       kfree(entry);
583 +err_ct_refcnt:
584 +       nf_ct_put(ct);
585 +
586 +       return NULL;
587 +}
588 +EXPORT_SYMBOL_GPL(flow_offload_alloc);
589 +
590 +void flow_offload_free(struct flow_offload *flow)
591 +{
592 +       struct flow_offload_entry *e;
593 +
594 +       dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
595 +       dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
596 +       e = container_of(flow, struct flow_offload_entry, flow);
597 +       nf_ct_delete(e->ct, 0, 0);
598 +       nf_ct_put(e->ct);
599 +       kfree_rcu(e, rcu_head);
600 +}
601 +EXPORT_SYMBOL_GPL(flow_offload_free);
602 +
603 +void flow_offload_dead(struct flow_offload *flow)
604 +{
605 +       flow->flags |= FLOW_OFFLOAD_DYING;
606 +}
607 +EXPORT_SYMBOL_GPL(flow_offload_dead);
608 +
609 +int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
610 +{
611 +       flow->timeout = (u32)jiffies;
612 +
613 +       rhashtable_insert_fast(&flow_table->rhashtable,
614 +                              &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
615 +                              *flow_table->type->params);
616 +       rhashtable_insert_fast(&flow_table->rhashtable,
617 +                              &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
618 +                              *flow_table->type->params);
619 +       return 0;
620 +}
621 +EXPORT_SYMBOL_GPL(flow_offload_add);
622 +
623 +static void flow_offload_del(struct nf_flowtable *flow_table,
624 +                            struct flow_offload *flow)
625 +{
626 +       rhashtable_remove_fast(&flow_table->rhashtable,
627 +                              &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
628 +                              *flow_table->type->params);
629 +       rhashtable_remove_fast(&flow_table->rhashtable,
630 +                              &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
631 +                              *flow_table->type->params);
632 +
633 +       flow_offload_free(flow);
634 +}
635 +
636 +struct flow_offload_tuple_rhash *
637 +flow_offload_lookup(struct nf_flowtable *flow_table,
638 +                   struct flow_offload_tuple *tuple)
639 +{
640 +       return rhashtable_lookup_fast(&flow_table->rhashtable, tuple,
641 +                                     *flow_table->type->params);
642 +}
643 +EXPORT_SYMBOL_GPL(flow_offload_lookup);
644 +
645 +int nf_flow_table_iterate(struct nf_flowtable *flow_table,
646 +                         void (*iter)(struct flow_offload *flow, void *data),
647 +                         void *data)
648 +{
649 +       struct flow_offload_tuple_rhash *tuplehash;
650 +       struct rhashtable_iter hti;
651 +       struct flow_offload *flow;
652 +       int err;
653 +
654 +       err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
655 +       if (err)
656 +               return err;
657 +
658 +       rhashtable_walk_start(&hti);
659 +
660 +       while ((tuplehash = rhashtable_walk_next(&hti))) {
661 +               if (IS_ERR(tuplehash)) {
662 +                       err = PTR_ERR(tuplehash);
663 +                       if (err != -EAGAIN)
664 +                               goto out;
665 +
666 +                       continue;
667 +               }
668 +               if (tuplehash->tuple.dir)
669 +                       continue;
670 +
671 +               flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
672 +
673 +               iter(flow, data);
674 +       }
675 +out:
676 +       rhashtable_walk_stop(&hti);
677 +       rhashtable_walk_exit(&hti);
678 +
679 +       return err;
680 +}
681 +EXPORT_SYMBOL_GPL(nf_flow_table_iterate);
682 +
683 +static inline bool nf_flow_has_expired(const struct flow_offload *flow)
684 +{
685 +       return (__s32)(flow->timeout - (u32)jiffies) <= 0;
686 +}
687 +
688 +static inline bool nf_flow_is_dying(const struct flow_offload *flow)
689 +{
690 +       return flow->flags & FLOW_OFFLOAD_DYING;
691 +}
692 +
693 +static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
694 +{
695 +       struct flow_offload_tuple_rhash *tuplehash;
696 +       struct rhashtable_iter hti;
697 +       struct flow_offload *flow;
698 +       int err;
699 +
700 +       err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
701 +       if (err)
702 +               return 0;
703 +
704 +       rhashtable_walk_start(&hti);
705 +
706 +       while ((tuplehash = rhashtable_walk_next(&hti))) {
707 +               if (IS_ERR(tuplehash)) {
708 +                       err = PTR_ERR(tuplehash);
709 +                       if (err != -EAGAIN)
710 +                               goto out;
711 +
712 +                       continue;
713 +               }
714 +               if (tuplehash->tuple.dir)
715 +                       continue;
716 +
717 +               flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
718 +
719 +               if (nf_flow_has_expired(flow) ||
720 +                   nf_flow_is_dying(flow))
721 +                       flow_offload_del(flow_table, flow);
722 +       }
723 +out:
724 +       rhashtable_walk_stop(&hti);
725 +       rhashtable_walk_exit(&hti);
726 +
727 +       return 1;
728 +}
729 +
730 +void nf_flow_offload_work_gc(struct work_struct *work)
731 +{
732 +       struct nf_flowtable *flow_table;
733 +
734 +       flow_table = container_of(work, struct nf_flowtable, gc_work.work);
735 +       nf_flow_offload_gc_step(flow_table);
736 +       queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
737 +}
738 +EXPORT_SYMBOL_GPL(nf_flow_offload_work_gc);
739 +
740 +static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
741 +{
742 +       const struct flow_offload_tuple *tuple = data;
743 +
744 +       return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
745 +}
746 +
747 +static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
748 +{
749 +       const struct flow_offload_tuple_rhash *tuplehash = data;
750 +
751 +       return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
752 +}
753 +
754 +static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
755 +                                       const void *ptr)
756 +{
757 +       const struct flow_offload_tuple *tuple = arg->key;
758 +       const struct flow_offload_tuple_rhash *x = ptr;
759 +
760 +       if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
761 +               return 1;
762 +
763 +       return 0;
764 +}
765 +
766 +const struct rhashtable_params nf_flow_offload_rhash_params = {
767 +       .head_offset            = offsetof(struct flow_offload_tuple_rhash, node),
768 +       .hashfn                 = flow_offload_hash,
769 +       .obj_hashfn             = flow_offload_hash_obj,
770 +       .obj_cmpfn              = flow_offload_hash_cmp,
771 +       .automatic_shrinking    = true,
772 +};
773 +EXPORT_SYMBOL_GPL(nf_flow_offload_rhash_params);
774 +
775 +static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
776 +                               __be16 port, __be16 new_port)
777 +{
778 +       struct tcphdr *tcph;
779 +
780 +       if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
781 +           skb_try_make_writable(skb, thoff + sizeof(*tcph)))
782 +               return -1;
783 +
784 +       tcph = (void *)(skb_network_header(skb) + thoff);
785 +       inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
786 +
787 +       return 0;
788 +}
789 +
790 +static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
791 +                               __be16 port, __be16 new_port)
792 +{
793 +       struct udphdr *udph;
794 +
795 +       if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
796 +           skb_try_make_writable(skb, thoff + sizeof(*udph)))
797 +               return -1;
798 +
799 +       udph = (void *)(skb_network_header(skb) + thoff);
800 +       if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
801 +               inet_proto_csum_replace2(&udph->check, skb, port,
802 +                                        new_port, true);
803 +               if (!udph->check)
804 +                       udph->check = CSUM_MANGLED_0;
805 +       }
806 +
807 +       return 0;
808 +}
809 +
810 +static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
811 +                           u8 protocol, __be16 port, __be16 new_port)
812 +{
813 +       switch (protocol) {
814 +       case IPPROTO_TCP:
815 +               if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
816 +                       return NF_DROP;
817 +               break;
818 +       case IPPROTO_UDP:
819 +               if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
820 +                       return NF_DROP;
821 +               break;
822 +       }
823 +
824 +       return 0;
825 +}
826 +
827 +int nf_flow_snat_port(const struct flow_offload *flow,
828 +                     struct sk_buff *skb, unsigned int thoff,
829 +                     u8 protocol, enum flow_offload_tuple_dir dir)
830 +{
831 +       struct flow_ports *hdr;
832 +       __be16 port, new_port;
833 +
834 +       if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
835 +           skb_try_make_writable(skb, thoff + sizeof(*hdr)))
836 +               return -1;
837 +
838 +       hdr = (void *)(skb_network_header(skb) + thoff);
839 +
840 +       switch (dir) {
841 +       case FLOW_OFFLOAD_DIR_ORIGINAL:
842 +               port = hdr->source;
843 +               new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
844 +               hdr->source = new_port;
845 +               break;
846 +       case FLOW_OFFLOAD_DIR_REPLY:
847 +               port = hdr->dest;
848 +               new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
849 +               hdr->dest = new_port;
850 +               break;
851 +       default:
852 +               return -1;
853 +       }
854 +
855 +       return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
856 +}
857 +EXPORT_SYMBOL_GPL(nf_flow_snat_port);
858 +
859 +int nf_flow_dnat_port(const struct flow_offload *flow,
860 +                     struct sk_buff *skb, unsigned int thoff,
861 +                     u8 protocol, enum flow_offload_tuple_dir dir)
862 +{
863 +       struct flow_ports *hdr;
864 +       __be16 port, new_port;
865 +
866 +       if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
867 +           skb_try_make_writable(skb, thoff + sizeof(*hdr)))
868 +               return -1;
869 +
870 +       hdr = (void *)(skb_network_header(skb) + thoff);
871 +
872 +       switch (dir) {
873 +       case FLOW_OFFLOAD_DIR_ORIGINAL:
874 +               port = hdr->dest;
875 +               new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
876 +               hdr->dest = new_port;
877 +               break;
878 +       case FLOW_OFFLOAD_DIR_REPLY:
879 +               port = hdr->source;
880 +               new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
881 +               hdr->source = new_port;
882 +               break;
883 +       default:
884 +               return -1;
885 +       }
886 +
887 +       return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
888 +}
889 +EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
890 +
891 +static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
892 +{
893 +       struct net_device *dev = data;
894 +
895 +       if (dev && flow->tuplehash[0].tuple.iifidx != dev->ifindex)
896 +               return;
897 +
898 +       flow_offload_dead(flow);
899 +}
900 +
901 +static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
902 +                                         void *data)
903 +{
904 +       nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, data);
905 +       flush_delayed_work(&flowtable->gc_work);
906 +}
907 +
908 +void nf_flow_table_cleanup(struct net *net, struct net_device *dev)
909 +{
910 +       nft_flow_table_iterate(net, nf_flow_table_iterate_cleanup, dev);
911 +}
912 +EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
913 +
914 +void nf_flow_table_free(struct nf_flowtable *flow_table)
915 +{
916 +       nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
917 +       WARN_ON(!nf_flow_offload_gc_step(flow_table));
918 +}
919 +EXPORT_SYMBOL_GPL(nf_flow_table_free);
920 +
921 +static int nf_flow_table_netdev_event(struct notifier_block *this,
922 +                                     unsigned long event, void *ptr)
923 +{
924 +       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
925 +
926 +       if (event != NETDEV_DOWN)
927 +               return NOTIFY_DONE;
928 +
929 +       nf_flow_table_cleanup(dev_net(dev), dev);
930 +
931 +       return NOTIFY_DONE;
932 +}
933 +
934 +static struct notifier_block flow_offload_netdev_notifier = {
935 +       .notifier_call  = nf_flow_table_netdev_event,
936 +};
937 +
938 +static int __init nf_flow_table_module_init(void)
939 +{
940 +       return register_netdevice_notifier(&flow_offload_netdev_notifier);
941 +}
942 +
943 +static void __exit nf_flow_table_module_exit(void)
944 +{
945 +       unregister_netdevice_notifier(&flow_offload_netdev_notifier);
946 +}
947 +
948 +module_init(nf_flow_table_module_init);
949 +module_exit(nf_flow_table_module_exit);
950 +
951 +MODULE_LICENSE("GPL");
952 +MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");