2 * x_tables core - Backend for {ip,ip6,arp}_tables
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
7 * Based on existing ip_tables code which is
8 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
9 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/socket.h>
20 #include <linux/net.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/string.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mutex.h>
27 #include <linux/slab.h>
28 #include <linux/audit.h>
29 #include <linux/user_namespace.h>
30 #include <net/net_namespace.h>
32 #include <linux/netfilter/x_tables.h>
33 #include <linux/netfilter_arp.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
35 #include <linux/netfilter_ipv6/ip6_tables.h>
36 #include <linux/netfilter_arp/arp_tables.h>
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
40 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
42 #define XT_PCPU_BLOCK_SIZE 4096
43 #define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
46 unsigned int offset; /* offset in kernel */
47 int delta; /* delta in 32bit user land */
52 struct list_head match;
53 struct list_head target;
55 struct mutex compat_mutex;
56 struct compat_delta *compat_tab;
57 unsigned int number; /* number of slots in compat_tab[] */
58 unsigned int cur; /* number of used slots in compat_tab[] */
62 static struct xt_af *xt;
64 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
65 [NFPROTO_UNSPEC] = "x",
66 [NFPROTO_IPV4] = "ip",
67 [NFPROTO_ARP] = "arp",
68 [NFPROTO_BRIDGE] = "eb",
69 [NFPROTO_IPV6] = "ip6",
72 /* Registration hooks for targets. */
73 int xt_register_target(struct xt_target *target)
75 u_int8_t af = target->family;
77 mutex_lock(&xt[af].mutex);
78 list_add(&target->list, &xt[af].target);
79 mutex_unlock(&xt[af].mutex);
82 EXPORT_SYMBOL(xt_register_target);
85 xt_unregister_target(struct xt_target *target)
87 u_int8_t af = target->family;
89 mutex_lock(&xt[af].mutex);
90 list_del(&target->list);
91 mutex_unlock(&xt[af].mutex);
93 EXPORT_SYMBOL(xt_unregister_target);
96 xt_register_targets(struct xt_target *target, unsigned int n)
101 for (i = 0; i < n; i++) {
102 err = xt_register_target(&target[i]);
110 xt_unregister_targets(target, i);
113 EXPORT_SYMBOL(xt_register_targets);
116 xt_unregister_targets(struct xt_target *target, unsigned int n)
119 xt_unregister_target(&target[n]);
121 EXPORT_SYMBOL(xt_unregister_targets);
123 int xt_register_match(struct xt_match *match)
125 u_int8_t af = match->family;
127 mutex_lock(&xt[af].mutex);
128 list_add(&match->list, &xt[af].match);
129 mutex_unlock(&xt[af].mutex);
132 EXPORT_SYMBOL(xt_register_match);
135 xt_unregister_match(struct xt_match *match)
137 u_int8_t af = match->family;
139 mutex_lock(&xt[af].mutex);
140 list_del(&match->list);
141 mutex_unlock(&xt[af].mutex);
143 EXPORT_SYMBOL(xt_unregister_match);
146 xt_register_matches(struct xt_match *match, unsigned int n)
151 for (i = 0; i < n; i++) {
152 err = xt_register_match(&match[i]);
160 xt_unregister_matches(match, i);
163 EXPORT_SYMBOL(xt_register_matches);
166 xt_unregister_matches(struct xt_match *match, unsigned int n)
169 xt_unregister_match(&match[n]);
171 EXPORT_SYMBOL(xt_unregister_matches);
175 * These are weird, but module loading must not be done with mutex
176 * held (since they will register), and we have to have a single
180 /* Find match, grabs ref. Returns ERR_PTR() on error. */
181 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
186 mutex_lock(&xt[af].mutex);
187 list_for_each_entry(m, &xt[af].match, list) {
188 if (strcmp(m->name, name) == 0) {
189 if (m->revision == revision) {
190 if (try_module_get(m->me)) {
191 mutex_unlock(&xt[af].mutex);
195 err = -EPROTOTYPE; /* Found something. */
198 mutex_unlock(&xt[af].mutex);
200 if (af != NFPROTO_UNSPEC)
201 /* Try searching again in the family-independent list */
202 return xt_find_match(NFPROTO_UNSPEC, name, revision);
206 EXPORT_SYMBOL(xt_find_match);
209 xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
211 struct xt_match *match;
213 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
214 return ERR_PTR(-EINVAL);
216 match = xt_find_match(nfproto, name, revision);
218 request_module("%st_%s", xt_prefix[nfproto], name);
219 match = xt_find_match(nfproto, name, revision);
224 EXPORT_SYMBOL_GPL(xt_request_find_match);
226 /* Find target, grabs ref. Returns ERR_PTR() on error. */
227 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
232 mutex_lock(&xt[af].mutex);
233 list_for_each_entry(t, &xt[af].target, list) {
234 if (strcmp(t->name, name) == 0) {
235 if (t->revision == revision) {
236 if (try_module_get(t->me)) {
237 mutex_unlock(&xt[af].mutex);
241 err = -EPROTOTYPE; /* Found something. */
244 mutex_unlock(&xt[af].mutex);
246 if (af != NFPROTO_UNSPEC)
247 /* Try searching again in the family-independent list */
248 return xt_find_target(NFPROTO_UNSPEC, name, revision);
252 EXPORT_SYMBOL(xt_find_target);
254 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
256 struct xt_target *target;
258 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
259 return ERR_PTR(-EINVAL);
261 target = xt_find_target(af, name, revision);
262 if (IS_ERR(target)) {
263 request_module("%st_%s", xt_prefix[af], name);
264 target = xt_find_target(af, name, revision);
269 EXPORT_SYMBOL_GPL(xt_request_find_target);
272 static int xt_obj_to_user(u16 __user *psize, u16 size,
273 void __user *pname, const char *name,
274 u8 __user *prev, u8 rev)
276 if (put_user(size, psize))
278 if (copy_to_user(pname, name, strlen(name) + 1))
280 if (put_user(rev, prev))
286 #define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
287 xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
288 U->u.user.name, K->u.kernel.TYPE->name, \
289 &U->u.user.revision, K->u.kernel.TYPE->revision)
291 int xt_data_to_user(void __user *dst, const void *src,
292 int usersize, int size, int aligned_size)
294 usersize = usersize ? : size;
295 if (copy_to_user(dst, src, usersize))
297 if (usersize != aligned_size &&
298 clear_user(dst + usersize, aligned_size - usersize))
303 EXPORT_SYMBOL_GPL(xt_data_to_user);
305 #define XT_DATA_TO_USER(U, K, TYPE) \
306 xt_data_to_user(U->data, K->data, \
307 K->u.kernel.TYPE->usersize, \
308 K->u.kernel.TYPE->TYPE##size, \
309 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
311 int xt_match_to_user(const struct xt_entry_match *m,
312 struct xt_entry_match __user *u)
314 return XT_OBJ_TO_USER(u, m, match, 0) ||
315 XT_DATA_TO_USER(u, m, match);
317 EXPORT_SYMBOL_GPL(xt_match_to_user);
319 int xt_target_to_user(const struct xt_entry_target *t,
320 struct xt_entry_target __user *u)
322 return XT_OBJ_TO_USER(u, t, target, 0) ||
323 XT_DATA_TO_USER(u, t, target);
325 EXPORT_SYMBOL_GPL(xt_target_to_user);
327 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
329 const struct xt_match *m;
332 list_for_each_entry(m, &xt[af].match, list) {
333 if (strcmp(m->name, name) == 0) {
334 if (m->revision > *bestp)
335 *bestp = m->revision;
336 if (m->revision == revision)
341 if (af != NFPROTO_UNSPEC && !have_rev)
342 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
347 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
349 const struct xt_target *t;
352 list_for_each_entry(t, &xt[af].target, list) {
353 if (strcmp(t->name, name) == 0) {
354 if (t->revision > *bestp)
355 *bestp = t->revision;
356 if (t->revision == revision)
361 if (af != NFPROTO_UNSPEC && !have_rev)
362 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
367 /* Returns true or false (if no such extension at all) */
368 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
371 int have_rev, best = -1;
373 mutex_lock(&xt[af].mutex);
375 have_rev = target_revfn(af, name, revision, &best);
377 have_rev = match_revfn(af, name, revision, &best);
378 mutex_unlock(&xt[af].mutex);
380 /* Nothing at all? Return 0 to try loading module. */
388 *err = -EPROTONOSUPPORT;
391 EXPORT_SYMBOL_GPL(xt_find_revision);
394 textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
396 static const char *const inetbr_names[] = {
397 "PREROUTING", "INPUT", "FORWARD",
398 "OUTPUT", "POSTROUTING", "BROUTING",
400 static const char *const arp_names[] = {
401 "INPUT", "FORWARD", "OUTPUT",
403 const char *const *names;
409 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
410 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
411 ARRAY_SIZE(inetbr_names);
413 for (i = 0; i < max; ++i) {
414 if (!(mask & (1 << i)))
416 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
428 * xt_check_proc_name - check that name is suitable for /proc file creation
430 * @name: file name candidate
431 * @size: length of buffer
433 * some x_tables modules wish to create a file in /proc.
434 * This function makes sure that the name is suitable for this
435 * purpose, it checks that name is NUL terminated and isn't a 'special'
438 * returns negative number on error or 0 if name is useable.
440 int xt_check_proc_name(const char *name, unsigned int size)
445 if (strnlen(name, size) == size)
446 return -ENAMETOOLONG;
448 if (strcmp(name, ".") == 0 ||
449 strcmp(name, "..") == 0 ||
455 EXPORT_SYMBOL(xt_check_proc_name);
457 int xt_check_match(struct xt_mtchk_param *par,
458 unsigned int size, u_int8_t proto, bool inv_proto)
462 if (XT_ALIGN(par->match->matchsize) != size &&
463 par->match->matchsize != -1) {
465 * ebt_among is exempt from centralized matchsize checking
466 * because it uses a dynamic-size data set.
468 pr_err("%s_tables: %s.%u match: invalid size "
469 "%u (kernel) != (user) %u\n",
470 xt_prefix[par->family], par->match->name,
471 par->match->revision,
472 XT_ALIGN(par->match->matchsize), size);
475 if (par->match->table != NULL &&
476 strcmp(par->match->table, par->table) != 0) {
477 pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
478 xt_prefix[par->family], par->match->name,
479 par->match->table, par->table);
482 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
483 char used[64], allow[64];
485 pr_err("%s_tables: %s match: used from hooks %s, but only "
487 xt_prefix[par->family], par->match->name,
488 textify_hooks(used, sizeof(used), par->hook_mask,
490 textify_hooks(allow, sizeof(allow), par->match->hooks,
494 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
495 pr_err("%s_tables: %s match: only valid for protocol %u\n",
496 xt_prefix[par->family], par->match->name,
500 if (par->match->checkentry != NULL) {
501 ret = par->match->checkentry(par);
505 /* Flag up potential errors. */
510 EXPORT_SYMBOL_GPL(xt_check_match);
512 /** xt_check_entry_match - check that matches end before start of target
514 * @match: beginning of xt_entry_match
515 * @target: beginning of this rules target (alleged end of matches)
516 * @alignment: alignment requirement of match structures
518 * Validates that all matches add up to the beginning of the target,
519 * and that each match covers at least the base structure size.
521 * Return: 0 on success, negative errno on failure.
523 static int xt_check_entry_match(const char *match, const char *target,
524 const size_t alignment)
526 const struct xt_entry_match *pos;
527 int length = target - match;
529 if (length == 0) /* no matches */
532 pos = (struct xt_entry_match *)match;
534 if ((unsigned long)pos % alignment)
537 if (length < (int)sizeof(struct xt_entry_match))
540 if (pos->u.match_size < sizeof(struct xt_entry_match))
543 if (pos->u.match_size > length)
546 length -= pos->u.match_size;
547 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
548 } while (length > 0);
554 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
556 struct xt_af *xp = &xt[af];
558 if (WARN_ON(!xp->compat_tab))
561 if (xp->cur >= xp->number)
565 delta += xp->compat_tab[xp->cur - 1].delta;
566 xp->compat_tab[xp->cur].offset = offset;
567 xp->compat_tab[xp->cur].delta = delta;
571 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
573 void xt_compat_flush_offsets(u_int8_t af)
575 if (xt[af].compat_tab) {
576 vfree(xt[af].compat_tab);
577 xt[af].compat_tab = NULL;
582 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
584 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
586 struct compat_delta *tmp = xt[af].compat_tab;
587 int mid, left = 0, right = xt[af].cur - 1;
589 while (left <= right) {
590 mid = (left + right) >> 1;
591 if (offset > tmp[mid].offset)
593 else if (offset < tmp[mid].offset)
596 return mid ? tmp[mid - 1].delta : 0;
598 return left ? tmp[left - 1].delta : 0;
600 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
602 int xt_compat_init_offsets(u8 af, unsigned int number)
606 if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
609 if (WARN_ON(xt[af].compat_tab))
612 mem = sizeof(struct compat_delta) * number;
613 if (mem > XT_MAX_TABLE_SIZE)
616 xt[af].compat_tab = vmalloc(mem);
617 if (!xt[af].compat_tab)
620 xt[af].number = number;
625 EXPORT_SYMBOL(xt_compat_init_offsets);
627 int xt_compat_match_offset(const struct xt_match *match)
629 u_int16_t csize = match->compatsize ? : match->matchsize;
630 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
632 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
634 void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
637 const struct xt_match *match = m->u.kernel.match;
638 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
639 int pad, off = xt_compat_match_offset(match);
640 u_int16_t msize = cm->u.user.match_size;
641 char name[sizeof(m->u.user.name)];
644 memcpy(m, cm, sizeof(*cm));
645 if (match->compat_from_user)
646 match->compat_from_user(m->data, cm->data);
648 memcpy(m->data, cm->data, msize - sizeof(*cm));
649 pad = XT_ALIGN(match->matchsize) - match->matchsize;
651 memset(m->data + match->matchsize, 0, pad);
654 m->u.user.match_size = msize;
655 strlcpy(name, match->name, sizeof(name));
656 module_put(match->me);
657 strncpy(m->u.user.name, name, sizeof(m->u.user.name));
662 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
664 #define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
665 xt_data_to_user(U->data, K->data, \
666 K->u.kernel.TYPE->usersize, \
668 COMPAT_XT_ALIGN(C_SIZE))
670 int xt_compat_match_to_user(const struct xt_entry_match *m,
671 void __user **dstptr, unsigned int *size)
673 const struct xt_match *match = m->u.kernel.match;
674 struct compat_xt_entry_match __user *cm = *dstptr;
675 int off = xt_compat_match_offset(match);
676 u_int16_t msize = m->u.user.match_size - off;
678 if (XT_OBJ_TO_USER(cm, m, match, msize))
681 if (match->compat_to_user) {
682 if (match->compat_to_user((void __user *)cm->data, m->data))
685 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
693 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
695 /* non-compat version may have padding after verdict */
696 struct compat_xt_standard_target {
697 struct compat_xt_entry_target t;
698 compat_uint_t verdict;
701 int xt_compat_check_entry_offsets(const void *base, const char *elems,
702 unsigned int target_offset,
703 unsigned int next_offset)
705 long size_of_base_struct = elems - (const char *)base;
706 const struct compat_xt_entry_target *t;
707 const char *e = base;
709 if (target_offset < size_of_base_struct)
712 if (target_offset + sizeof(*t) > next_offset)
715 t = (void *)(e + target_offset);
716 if (t->u.target_size < sizeof(*t))
719 if (target_offset + t->u.target_size > next_offset)
722 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
723 COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
726 /* compat_xt_entry match has less strict alignment requirements,
727 * otherwise they are identical. In case of padding differences
728 * we need to add compat version of xt_check_entry_match.
730 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
732 return xt_check_entry_match(elems, base + target_offset,
733 __alignof__(struct compat_xt_entry_match));
735 EXPORT_SYMBOL(xt_compat_check_entry_offsets);
736 #endif /* CONFIG_COMPAT */
739 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
741 * @base: pointer to arp/ip/ip6t_entry
742 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
743 * @target_offset: the arp/ip/ip6_t->target_offset
744 * @next_offset: the arp/ip/ip6_t->next_offset
746 * validates that target_offset and next_offset are sane and that all
747 * match sizes (if any) align with the target offset.
749 * This function does not validate the targets or matches themselves, it
750 * only tests that all the offsets and sizes are correct, that all
751 * match structures are aligned, and that the last structure ends where
752 * the target structure begins.
754 * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
756 * The arp/ip/ip6t_entry structure @base must have passed following tests:
757 * - it must point to a valid memory location
758 * - base to base + next_offset must be accessible, i.e. not exceed allocated
761 * A well-formed entry looks like this:
763 * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
764 * e->elems[]-----' | |
768 * target_offset---------------------------------' |
769 * next_offset---------------------------------------------------'
771 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
772 * This is where matches (if any) and the target reside.
773 * target_offset: beginning of target.
774 * next_offset: start of the next rule; also: size of this rule.
775 * Since targets have a minimum size, target_offset + minlen <= next_offset.
777 * Every match stores its size, sum of sizes must not exceed target_offset.
779 * Return: 0 on success, negative errno on failure.
781 int xt_check_entry_offsets(const void *base,
783 unsigned int target_offset,
784 unsigned int next_offset)
786 long size_of_base_struct = elems - (const char *)base;
787 const struct xt_entry_target *t;
788 const char *e = base;
790 /* target start is within the ip/ip6/arpt_entry struct */
791 if (target_offset < size_of_base_struct)
794 if (target_offset + sizeof(*t) > next_offset)
797 t = (void *)(e + target_offset);
798 if (t->u.target_size < sizeof(*t))
801 if (target_offset + t->u.target_size > next_offset)
804 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
805 XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
808 return xt_check_entry_match(elems, base + target_offset,
809 __alignof__(struct xt_entry_match));
811 EXPORT_SYMBOL(xt_check_entry_offsets);
814 * xt_alloc_entry_offsets - allocate array to store rule head offsets
816 * @size: number of entries
818 * Return: NULL or kmalloc'd or vmalloc'd array
820 unsigned int *xt_alloc_entry_offsets(unsigned int size)
822 if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
825 return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
828 EXPORT_SYMBOL(xt_alloc_entry_offsets);
831 * xt_find_jump_offset - check if target is a valid jump offset
833 * @offsets: array containing all valid rule start offsets of a rule blob
834 * @target: the jump target to search for
835 * @size: entries in @offset
837 bool xt_find_jump_offset(const unsigned int *offsets,
838 unsigned int target, unsigned int size)
840 int m, low = 0, hi = size;
845 if (offsets[m] > target)
847 else if (offsets[m] < target)
855 EXPORT_SYMBOL(xt_find_jump_offset);
857 int xt_check_target(struct xt_tgchk_param *par,
858 unsigned int size, u_int8_t proto, bool inv_proto)
862 if (XT_ALIGN(par->target->targetsize) != size) {
863 pr_err("%s_tables: %s.%u target: invalid size "
864 "%u (kernel) != (user) %u\n",
865 xt_prefix[par->family], par->target->name,
866 par->target->revision,
867 XT_ALIGN(par->target->targetsize), size);
870 if (par->target->table != NULL &&
871 strcmp(par->target->table, par->table) != 0) {
872 pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
873 xt_prefix[par->family], par->target->name,
874 par->target->table, par->table);
877 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
878 char used[64], allow[64];
880 pr_err("%s_tables: %s target: used from hooks %s, but only "
882 xt_prefix[par->family], par->target->name,
883 textify_hooks(used, sizeof(used), par->hook_mask,
885 textify_hooks(allow, sizeof(allow), par->target->hooks,
889 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
890 pr_err("%s_tables: %s target: only valid for protocol %u\n",
891 xt_prefix[par->family], par->target->name,
895 if (par->target->checkentry != NULL) {
896 ret = par->target->checkentry(par);
900 /* Flag up potential errors. */
905 EXPORT_SYMBOL_GPL(xt_check_target);
908 * xt_copy_counters_from_user - copy counters and metadata from userspace
910 * @user: src pointer to userspace memory
911 * @len: alleged size of userspace memory
912 * @info: where to store the xt_counters_info metadata
913 * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
915 * Copies counter meta data from @user and stores it in @info.
917 * vmallocs memory to hold the counters, then copies the counter data
918 * from @user to the new memory and returns a pointer to it.
920 * If @compat is true, @info gets converted automatically to the 64bit
923 * The metadata associated with the counters is stored in @info.
925 * Return: returns pointer that caller has to test via IS_ERR().
926 * If IS_ERR is false, caller has to vfree the pointer.
928 void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
929 struct xt_counters_info *info, bool compat)
936 /* structures only differ in size due to alignment */
937 struct compat_xt_counters_info compat_tmp;
939 if (len <= sizeof(compat_tmp))
940 return ERR_PTR(-EINVAL);
942 len -= sizeof(compat_tmp);
943 if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
944 return ERR_PTR(-EFAULT);
946 memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
947 info->num_counters = compat_tmp.num_counters;
948 user += sizeof(compat_tmp);
952 if (len <= sizeof(*info))
953 return ERR_PTR(-EINVAL);
955 len -= sizeof(*info);
956 if (copy_from_user(info, user, sizeof(*info)) != 0)
957 return ERR_PTR(-EFAULT);
959 user += sizeof(*info);
961 info->name[sizeof(info->name) - 1] = '\0';
963 size = sizeof(struct xt_counters);
964 size *= info->num_counters;
966 if (size != (u64)len)
967 return ERR_PTR(-EINVAL);
971 return ERR_PTR(-ENOMEM);
973 if (copy_from_user(mem, user, len) == 0)
977 return ERR_PTR(-EFAULT);
979 EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
982 int xt_compat_target_offset(const struct xt_target *target)
984 u_int16_t csize = target->compatsize ? : target->targetsize;
985 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
987 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
989 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
992 const struct xt_target *target = t->u.kernel.target;
993 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
994 int pad, off = xt_compat_target_offset(target);
995 u_int16_t tsize = ct->u.user.target_size;
996 char name[sizeof(t->u.user.name)];
999 memcpy(t, ct, sizeof(*ct));
1000 if (target->compat_from_user)
1001 target->compat_from_user(t->data, ct->data);
1003 memcpy(t->data, ct->data, tsize - sizeof(*ct));
1004 pad = XT_ALIGN(target->targetsize) - target->targetsize;
1006 memset(t->data + target->targetsize, 0, pad);
1009 t->u.user.target_size = tsize;
1010 strlcpy(name, target->name, sizeof(name));
1011 module_put(target->me);
1012 strncpy(t->u.user.name, name, sizeof(t->u.user.name));
1017 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1019 int xt_compat_target_to_user(const struct xt_entry_target *t,
1020 void __user **dstptr, unsigned int *size)
1022 const struct xt_target *target = t->u.kernel.target;
1023 struct compat_xt_entry_target __user *ct = *dstptr;
1024 int off = xt_compat_target_offset(target);
1025 u_int16_t tsize = t->u.user.target_size - off;
1027 if (XT_OBJ_TO_USER(ct, t, target, tsize))
1030 if (target->compat_to_user) {
1031 if (target->compat_to_user((void __user *)ct->data, t->data))
1034 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1042 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1045 struct xt_table_info *xt_alloc_table_info(unsigned int size)
1047 struct xt_table_info *info = NULL;
1048 size_t sz = sizeof(*info) + size;
1050 if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1053 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
1054 if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
1057 /* __GFP_NORETRY is not fully supported by kvmalloc but it should
1058 * work reasonably well if sz is too large and bail out rather
1059 * than shoot all processes down before realizing there is nothing
1062 info = kvmalloc(sz, GFP_KERNEL | __GFP_NORETRY);
1066 memset(info, 0, sizeof(*info));
1070 EXPORT_SYMBOL(xt_alloc_table_info);
1072 void xt_free_table_info(struct xt_table_info *info)
1076 if (info->jumpstack != NULL) {
1077 for_each_possible_cpu(cpu)
1078 kvfree(info->jumpstack[cpu]);
1079 kvfree(info->jumpstack);
1084 EXPORT_SYMBOL(xt_free_table_info);
1086 /* Find table by name, grabs mutex & ref. Returns NULL on error. */
1087 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1090 struct xt_table *t, *found = NULL;
1092 mutex_lock(&xt[af].mutex);
1093 list_for_each_entry(t, &net->xt.tables[af], list)
1094 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1097 if (net == &init_net)
1100 /* Table doesn't exist in this netns, re-try init */
1101 list_for_each_entry(t, &init_net.xt.tables[af], list) {
1102 if (strcmp(t->name, name))
1104 if (!try_module_get(t->me)) {
1105 mutex_unlock(&xt[af].mutex);
1109 mutex_unlock(&xt[af].mutex);
1110 if (t->table_init(net) != 0) {
1117 mutex_lock(&xt[af].mutex);
1124 /* and once again: */
1125 list_for_each_entry(t, &net->xt.tables[af], list)
1126 if (strcmp(t->name, name) == 0)
1129 module_put(found->me);
1131 mutex_unlock(&xt[af].mutex);
1134 EXPORT_SYMBOL_GPL(xt_find_table_lock);
1136 void xt_table_unlock(struct xt_table *table)
1138 mutex_unlock(&xt[table->af].mutex);
1140 EXPORT_SYMBOL_GPL(xt_table_unlock);
1142 #ifdef CONFIG_COMPAT
1143 void xt_compat_lock(u_int8_t af)
1145 mutex_lock(&xt[af].compat_mutex);
1147 EXPORT_SYMBOL_GPL(xt_compat_lock);
1149 void xt_compat_unlock(u_int8_t af)
1151 mutex_unlock(&xt[af].compat_mutex);
1153 EXPORT_SYMBOL_GPL(xt_compat_unlock);
1156 DEFINE_PER_CPU(seqcount_t, xt_recseq);
1157 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1159 struct static_key xt_tee_enabled __read_mostly;
1160 EXPORT_SYMBOL_GPL(xt_tee_enabled);
1162 static int xt_jumpstack_alloc(struct xt_table_info *i)
1167 size = sizeof(void **) * nr_cpu_ids;
1168 if (size > PAGE_SIZE)
1169 i->jumpstack = kvzalloc(size, GFP_KERNEL);
1171 i->jumpstack = kzalloc(size, GFP_KERNEL);
1172 if (i->jumpstack == NULL)
1175 /* ruleset without jumps -- no stack needed */
1176 if (i->stacksize == 0)
1179 /* Jumpstack needs to be able to record two full callchains, one
1180 * from the first rule set traversal, plus one table reentrancy
1181 * via -j TEE without clobbering the callchain that brought us to
1184 * This is done by allocating two jumpstacks per cpu, on reentry
1185 * the upper half of the stack is used.
1187 * see the jumpstack setup in ipt_do_table() for more details.
1189 size = sizeof(void *) * i->stacksize * 2u;
1190 for_each_possible_cpu(cpu) {
1191 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1193 if (i->jumpstack[cpu] == NULL)
1195 * Freeing will be done later on by the callers. The
1196 * chain is: xt_replace_table -> __do_replace ->
1197 * do_replace -> xt_free_table_info.
1205 struct xt_counters *xt_counters_alloc(unsigned int counters)
1207 struct xt_counters *mem;
1209 if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1212 counters *= sizeof(*mem);
1213 if (counters > XT_MAX_TABLE_SIZE)
1216 return vzalloc(counters);
1218 EXPORT_SYMBOL(xt_counters_alloc);
1220 struct xt_table_info *
1221 xt_replace_table(struct xt_table *table,
1222 unsigned int num_counters,
1223 struct xt_table_info *newinfo,
1226 struct xt_table_info *private;
1229 ret = xt_jumpstack_alloc(newinfo);
1235 /* Do the substitution. */
1237 private = table->private;
1239 /* Check inside lock: is the old number correct? */
1240 if (num_counters != private->number) {
1241 pr_debug("num_counters != table->private->number (%u/%u)\n",
1242 num_counters, private->number);
1248 newinfo->initial_entries = private->initial_entries;
1250 * Ensure contents of newinfo are visible before assigning to
1254 table->private = newinfo;
1257 * Even though table entries have now been swapped, other CPU's
1258 * may still be using the old entries. This is okay, because
1259 * resynchronization happens because of the locking done
1260 * during the get_counters() routine.
1265 if (audit_enabled) {
1266 audit_log(current->audit_context, GFP_KERNEL,
1267 AUDIT_NETFILTER_CFG,
1268 "table=%s family=%u entries=%u",
1269 table->name, table->af, private->number);
1275 EXPORT_SYMBOL_GPL(xt_replace_table);
1277 struct xt_table *xt_register_table(struct net *net,
1278 const struct xt_table *input_table,
1279 struct xt_table_info *bootstrap,
1280 struct xt_table_info *newinfo)
1283 struct xt_table_info *private;
1284 struct xt_table *t, *table;
1286 /* Don't add one object to multiple lists. */
1287 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1293 mutex_lock(&xt[table->af].mutex);
1294 /* Don't autoload: we'd eat our tail... */
1295 list_for_each_entry(t, &net->xt.tables[table->af], list) {
1296 if (strcmp(t->name, table->name) == 0) {
1302 /* Simplifies replace_table code. */
1303 table->private = bootstrap;
1305 if (!xt_replace_table(table, 0, newinfo, &ret))
1308 private = table->private;
1309 pr_debug("table->private->number = %u\n", private->number);
1311 /* save number of initial entries */
1312 private->initial_entries = private->number;
1314 list_add(&table->list, &net->xt.tables[table->af]);
1315 mutex_unlock(&xt[table->af].mutex);
1319 mutex_unlock(&xt[table->af].mutex);
1322 return ERR_PTR(ret);
1324 EXPORT_SYMBOL_GPL(xt_register_table);
1326 void *xt_unregister_table(struct xt_table *table)
1328 struct xt_table_info *private;
1330 mutex_lock(&xt[table->af].mutex);
1331 private = table->private;
1332 list_del(&table->list);
1333 mutex_unlock(&xt[table->af].mutex);
1338 EXPORT_SYMBOL_GPL(xt_unregister_table);
1340 #ifdef CONFIG_PROC_FS
1341 struct xt_names_priv {
1342 struct seq_net_private p;
1345 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1347 struct xt_names_priv *priv = seq->private;
1348 struct net *net = seq_file_net(seq);
1349 u_int8_t af = priv->af;
1351 mutex_lock(&xt[af].mutex);
1352 return seq_list_start(&net->xt.tables[af], *pos);
1355 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1357 struct xt_names_priv *priv = seq->private;
1358 struct net *net = seq_file_net(seq);
1359 u_int8_t af = priv->af;
1361 return seq_list_next(v, &net->xt.tables[af], pos);
1364 static void xt_table_seq_stop(struct seq_file *seq, void *v)
1366 struct xt_names_priv *priv = seq->private;
1367 u_int8_t af = priv->af;
1369 mutex_unlock(&xt[af].mutex);
1372 static int xt_table_seq_show(struct seq_file *seq, void *v)
1374 struct xt_table *table = list_entry(v, struct xt_table, list);
1377 seq_printf(seq, "%s\n", table->name);
1381 static const struct seq_operations xt_table_seq_ops = {
1382 .start = xt_table_seq_start,
1383 .next = xt_table_seq_next,
1384 .stop = xt_table_seq_stop,
1385 .show = xt_table_seq_show,
1388 static int xt_table_open(struct inode *inode, struct file *file)
1391 struct xt_names_priv *priv;
1393 ret = seq_open_net(inode, file, &xt_table_seq_ops,
1394 sizeof(struct xt_names_priv));
1396 priv = ((struct seq_file *)file->private_data)->private;
1397 priv->af = (unsigned long)PDE_DATA(inode);
1402 static const struct file_operations xt_table_ops = {
1403 .owner = THIS_MODULE,
1404 .open = xt_table_open,
1406 .llseek = seq_lseek,
1407 .release = seq_release_net,
1411 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1412 * the multi-AF mutexes.
1414 struct nf_mttg_trav {
1415 struct list_head *head, *curr;
1416 uint8_t class, nfproto;
1421 MTTG_TRAV_NFP_UNSPEC,
1426 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1429 static const uint8_t next_class[] = {
1430 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1431 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1433 struct nf_mttg_trav *trav = seq->private;
1435 switch (trav->class) {
1436 case MTTG_TRAV_INIT:
1437 trav->class = MTTG_TRAV_NFP_UNSPEC;
1438 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1439 trav->head = trav->curr = is_target ?
1440 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1442 case MTTG_TRAV_NFP_UNSPEC:
1443 trav->curr = trav->curr->next;
1444 if (trav->curr != trav->head)
1446 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1447 mutex_lock(&xt[trav->nfproto].mutex);
1448 trav->head = trav->curr = is_target ?
1449 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
1450 trav->class = next_class[trav->class];
1452 case MTTG_TRAV_NFP_SPEC:
1453 trav->curr = trav->curr->next;
1454 if (trav->curr != trav->head)
1456 /* fallthru, _stop will unlock */
1466 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1469 struct nf_mttg_trav *trav = seq->private;
1472 trav->class = MTTG_TRAV_INIT;
1473 for (j = 0; j < *pos; ++j)
1474 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1479 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1481 struct nf_mttg_trav *trav = seq->private;
1483 switch (trav->class) {
1484 case MTTG_TRAV_NFP_UNSPEC:
1485 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1487 case MTTG_TRAV_NFP_SPEC:
1488 mutex_unlock(&xt[trav->nfproto].mutex);
1493 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1495 return xt_mttg_seq_start(seq, pos, false);
1498 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1500 return xt_mttg_seq_next(seq, v, ppos, false);
1503 static int xt_match_seq_show(struct seq_file *seq, void *v)
1505 const struct nf_mttg_trav *trav = seq->private;
1506 const struct xt_match *match;
1508 switch (trav->class) {
1509 case MTTG_TRAV_NFP_UNSPEC:
1510 case MTTG_TRAV_NFP_SPEC:
1511 if (trav->curr == trav->head)
1513 match = list_entry(trav->curr, struct xt_match, list);
1515 seq_printf(seq, "%s\n", match->name);
1520 static const struct seq_operations xt_match_seq_ops = {
1521 .start = xt_match_seq_start,
1522 .next = xt_match_seq_next,
1523 .stop = xt_mttg_seq_stop,
1524 .show = xt_match_seq_show,
1527 static int xt_match_open(struct inode *inode, struct file *file)
1529 struct nf_mttg_trav *trav;
1530 trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav));
1534 trav->nfproto = (unsigned long)PDE_DATA(inode);
1538 static const struct file_operations xt_match_ops = {
1539 .owner = THIS_MODULE,
1540 .open = xt_match_open,
1542 .llseek = seq_lseek,
1543 .release = seq_release_private,
1546 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1548 return xt_mttg_seq_start(seq, pos, true);
1551 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1553 return xt_mttg_seq_next(seq, v, ppos, true);
1556 static int xt_target_seq_show(struct seq_file *seq, void *v)
1558 const struct nf_mttg_trav *trav = seq->private;
1559 const struct xt_target *target;
1561 switch (trav->class) {
1562 case MTTG_TRAV_NFP_UNSPEC:
1563 case MTTG_TRAV_NFP_SPEC:
1564 if (trav->curr == trav->head)
1566 target = list_entry(trav->curr, struct xt_target, list);
1568 seq_printf(seq, "%s\n", target->name);
1573 static const struct seq_operations xt_target_seq_ops = {
1574 .start = xt_target_seq_start,
1575 .next = xt_target_seq_next,
1576 .stop = xt_mttg_seq_stop,
1577 .show = xt_target_seq_show,
1580 static int xt_target_open(struct inode *inode, struct file *file)
1582 struct nf_mttg_trav *trav;
1583 trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav));
1587 trav->nfproto = (unsigned long)PDE_DATA(inode);
1591 static const struct file_operations xt_target_ops = {
1592 .owner = THIS_MODULE,
1593 .open = xt_target_open,
1595 .llseek = seq_lseek,
1596 .release = seq_release_private,
1599 #define FORMAT_TABLES "_tables_names"
1600 #define FORMAT_MATCHES "_tables_matches"
1601 #define FORMAT_TARGETS "_tables_targets"
1603 #endif /* CONFIG_PROC_FS */
1606 * xt_hook_ops_alloc - set up hooks for a new table
1607 * @table: table with metadata needed to set up hooks
1608 * @fn: Hook function
1610 * This function will create the nf_hook_ops that the x_table needs
1611 * to hand to xt_hook_link_net().
1613 struct nf_hook_ops *
1614 xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1616 unsigned int hook_mask = table->valid_hooks;
1617 uint8_t i, num_hooks = hweight32(hook_mask);
1619 struct nf_hook_ops *ops;
1622 return ERR_PTR(-EINVAL);
1624 ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1626 return ERR_PTR(-ENOMEM);
1628 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1629 hook_mask >>= 1, ++hooknum) {
1630 if (!(hook_mask & 1))
1633 ops[i].pf = table->af;
1634 ops[i].hooknum = hooknum;
1635 ops[i].priority = table->priority;
1641 EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1643 int xt_proto_init(struct net *net, u_int8_t af)
1645 #ifdef CONFIG_PROC_FS
1646 char buf[XT_FUNCTION_MAXNAMELEN];
1647 struct proc_dir_entry *proc;
1652 if (af >= ARRAY_SIZE(xt_prefix))
1656 #ifdef CONFIG_PROC_FS
1657 root_uid = make_kuid(net->user_ns, 0);
1658 root_gid = make_kgid(net->user_ns, 0);
1660 strlcpy(buf, xt_prefix[af], sizeof(buf));
1661 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1662 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1663 (void *)(unsigned long)af);
1666 if (uid_valid(root_uid) && gid_valid(root_gid))
1667 proc_set_user(proc, root_uid, root_gid);
1669 strlcpy(buf, xt_prefix[af], sizeof(buf));
1670 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1671 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1672 (void *)(unsigned long)af);
1674 goto out_remove_tables;
1675 if (uid_valid(root_uid) && gid_valid(root_gid))
1676 proc_set_user(proc, root_uid, root_gid);
1678 strlcpy(buf, xt_prefix[af], sizeof(buf));
1679 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1680 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1681 (void *)(unsigned long)af);
1683 goto out_remove_matches;
1684 if (uid_valid(root_uid) && gid_valid(root_gid))
1685 proc_set_user(proc, root_uid, root_gid);
1690 #ifdef CONFIG_PROC_FS
1692 strlcpy(buf, xt_prefix[af], sizeof(buf));
1693 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1694 remove_proc_entry(buf, net->proc_net);
1697 strlcpy(buf, xt_prefix[af], sizeof(buf));
1698 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1699 remove_proc_entry(buf, net->proc_net);
1704 EXPORT_SYMBOL_GPL(xt_proto_init);
1706 void xt_proto_fini(struct net *net, u_int8_t af)
1708 #ifdef CONFIG_PROC_FS
1709 char buf[XT_FUNCTION_MAXNAMELEN];
1711 strlcpy(buf, xt_prefix[af], sizeof(buf));
1712 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1713 remove_proc_entry(buf, net->proc_net);
1715 strlcpy(buf, xt_prefix[af], sizeof(buf));
1716 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1717 remove_proc_entry(buf, net->proc_net);
1719 strlcpy(buf, xt_prefix[af], sizeof(buf));
1720 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1721 remove_proc_entry(buf, net->proc_net);
1722 #endif /*CONFIG_PROC_FS*/
1724 EXPORT_SYMBOL_GPL(xt_proto_fini);
1727 * xt_percpu_counter_alloc - allocate x_tables rule counter
1729 * @state: pointer to xt_percpu allocation state
1730 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1732 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1733 * contain the address of the real (percpu) counter.
1735 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1736 * to fetch the real percpu counter.
1738 * To speed up allocation and improve data locality, a 4kb block is
1741 * xt_percpu_counter_alloc_state contains the base address of the
1742 * allocated page and the current sub-offset.
1744 * returns false on error.
1746 bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1747 struct xt_counters *counter)
1749 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1751 if (nr_cpu_ids <= 1)
1755 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1756 XT_PCPU_BLOCK_SIZE);
1760 counter->pcnt = (__force unsigned long)(state->mem + state->off);
1761 state->off += sizeof(*counter);
1762 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1768 EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1770 void xt_percpu_counter_free(struct xt_counters *counters)
1772 unsigned long pcnt = counters->pcnt;
1774 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1775 free_percpu((void __percpu *)pcnt);
1777 EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1779 static int __net_init xt_net_init(struct net *net)
1783 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1784 INIT_LIST_HEAD(&net->xt.tables[i]);
1788 static struct pernet_operations xt_net_ops = {
1789 .init = xt_net_init,
1792 static int __init xt_init(void)
1797 for_each_possible_cpu(i) {
1798 seqcount_init(&per_cpu(xt_recseq, i));
1801 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1805 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1806 mutex_init(&xt[i].mutex);
1807 #ifdef CONFIG_COMPAT
1808 mutex_init(&xt[i].compat_mutex);
1809 xt[i].compat_tab = NULL;
1811 INIT_LIST_HEAD(&xt[i].target);
1812 INIT_LIST_HEAD(&xt[i].match);
1814 rv = register_pernet_subsys(&xt_net_ops);
1820 static void __exit xt_fini(void)
1822 unregister_pernet_subsys(&xt_net_ops);
1826 module_init(xt_init);
1827 module_exit(xt_fini);