d98b4bfb7f58f95cb2125d3d1a201849cf2779a5
[librecmc/librecmc.git] /
1 From: Oz Shlomo <ozsh@nvidia.com>
2 Date: Tue, 23 Mar 2021 00:56:19 +0100
3 Subject: [PATCH] netfilter: flowtable: separate replace, destroy and
4  stats to different workqueues
5
6 Currently the flow table offload replace, destroy and stats work items are
7 executed on a single workqueue. As such, DESTROY and STATS commands may
8 be backloged after a burst of REPLACE work items. This scenario can bloat
9 up memory and may cause active connections to age.
10
11 Instatiate add, del and stats workqueues to avoid backlogs of non-dependent
12 actions. Provide sysfs control over the workqueue attributes, allowing
13 userspace applications to control the workqueue cpumask.
14
15 Signed-off-by: Oz Shlomo <ozsh@nvidia.com>
16 Reviewed-by: Paul Blakey <paulb@nvidia.com>
17 Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
18 ---
19
20 --- a/net/netfilter/nf_flow_table_offload.c
21 +++ b/net/netfilter/nf_flow_table_offload.c
22 @@ -13,7 +13,9 @@
23  #include <net/netfilter/nf_conntrack_core.h>
24  #include <net/netfilter/nf_conntrack_tuple.h>
25  
26 -static struct workqueue_struct *nf_flow_offload_wq;
27 +static struct workqueue_struct *nf_flow_offload_add_wq;
28 +static struct workqueue_struct *nf_flow_offload_del_wq;
29 +static struct workqueue_struct *nf_flow_offload_stats_wq;
30  
31  struct flow_offload_work {
32         struct list_head        list;
33 @@ -827,7 +829,12 @@ static void flow_offload_work_handler(st
34  
35  static void flow_offload_queue_work(struct flow_offload_work *offload)
36  {
37 -       queue_work(nf_flow_offload_wq, &offload->work);
38 +       if (offload->cmd == FLOW_CLS_REPLACE)
39 +               queue_work(nf_flow_offload_add_wq, &offload->work);
40 +       else if (offload->cmd == FLOW_CLS_DESTROY)
41 +               queue_work(nf_flow_offload_del_wq, &offload->work);
42 +       else
43 +               queue_work(nf_flow_offload_stats_wq, &offload->work);
44  }
45  
46  static struct flow_offload_work *
47 @@ -899,8 +906,11 @@ void nf_flow_offload_stats(struct nf_flo
48  
49  void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
50  {
51 -       if (nf_flowtable_hw_offload(flowtable))
52 -               flush_workqueue(nf_flow_offload_wq);
53 +       if (nf_flowtable_hw_offload(flowtable)) {
54 +               flush_workqueue(nf_flow_offload_add_wq);
55 +               flush_workqueue(nf_flow_offload_del_wq);
56 +               flush_workqueue(nf_flow_offload_stats_wq);
57 +       }
58  }
59  
60  static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
61 @@ -1013,15 +1023,33 @@ EXPORT_SYMBOL_GPL(nf_flow_table_offload_
62  
63  int nf_flow_table_offload_init(void)
64  {
65 -       nf_flow_offload_wq  = alloc_workqueue("nf_flow_table_offload",
66 -                                             WQ_UNBOUND, 0);
67 -       if (!nf_flow_offload_wq)
68 +       nf_flow_offload_add_wq  = alloc_workqueue("nf_ft_offload_add",
69 +                                                 WQ_UNBOUND | WQ_SYSFS, 0);
70 +       if (!nf_flow_offload_add_wq)
71                 return -ENOMEM;
72  
73 +       nf_flow_offload_del_wq  = alloc_workqueue("nf_ft_offload_del",
74 +                                                 WQ_UNBOUND | WQ_SYSFS, 0);
75 +       if (!nf_flow_offload_del_wq)
76 +               goto err_del_wq;
77 +
78 +       nf_flow_offload_stats_wq  = alloc_workqueue("nf_ft_offload_stats",
79 +                                                   WQ_UNBOUND | WQ_SYSFS, 0);
80 +       if (!nf_flow_offload_stats_wq)
81 +               goto err_stats_wq;
82 +
83         return 0;
84 +
85 +err_stats_wq:
86 +       destroy_workqueue(nf_flow_offload_del_wq);
87 +err_del_wq:
88 +       destroy_workqueue(nf_flow_offload_add_wq);
89 +       return -ENOMEM;
90  }
91  
92  void nf_flow_table_offload_exit(void)
93  {
94 -       destroy_workqueue(nf_flow_offload_wq);
95 +       destroy_workqueue(nf_flow_offload_add_wq);
96 +       destroy_workqueue(nf_flow_offload_del_wq);
97 +       destroy_workqueue(nf_flow_offload_stats_wq);
98  }