Linux-libre 3.16.85-gnu
[librecmc/linux-libre.git] / include / linux / percpu-defs.h
1 #ifndef _LINUX_PERCPU_DEFS_H
2 #define _LINUX_PERCPU_DEFS_H
3
4 #ifdef CONFIG_PAGE_TABLE_ISOLATION
5 #define USER_MAPPED_SECTION "..user_mapped"
6 #else
7 #define USER_MAPPED_SECTION ""
8 #endif
9
10 /*
11  * Base implementations of per-CPU variable declarations and definitions, where
12  * the section in which the variable is to be placed is provided by the
13  * 'sec' argument.  This may be used to affect the parameters governing the
14  * variable's storage.
15  *
16  * NOTE!  The sections for the DECLARE and for the DEFINE must match, lest
17  * linkage errors occur due the compiler generating the wrong code to access
18  * that section.
19  */
20 #define __PCPU_ATTRS(sec)                                               \
21         __percpu __attribute__((section(PER_CPU_BASE_SECTION sec)))     \
22         PER_CPU_ATTRIBUTES
23
24 #define __PCPU_DUMMY_ATTRS                                              \
25         __attribute__((section(".discard"), unused))
26
27 /*
28  * Macro which verifies @ptr is a percpu pointer without evaluating
29  * @ptr.  This is to be used in percpu accessors to verify that the
30  * input parameter is a percpu pointer.
31  *
32  * + 0 is required in order to convert the pointer type from a
33  * potential array type to a pointer to a single item of the array.
34  */
35 #define __verify_pcpu_ptr(ptr)  do {                                    \
36         const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;    \
37         (void)__vpp_verify;                                             \
38 } while (0)
39
40 /*
41  * s390 and alpha modules require percpu variables to be defined as
42  * weak to force the compiler to generate GOT based external
43  * references for them.  This is necessary because percpu sections
44  * will be located outside of the usually addressable area.
45  *
46  * This definition puts the following two extra restrictions when
47  * defining percpu variables.
48  *
49  * 1. The symbol must be globally unique, even the static ones.
50  * 2. Static percpu variables cannot be defined inside a function.
51  *
52  * Archs which need weak percpu definitions should define
53  * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
54  *
55  * To ensure that the generic code observes the above two
56  * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
57  * definition is used for all cases.
58  */
59 #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
60 /*
61  * __pcpu_scope_* dummy variable is used to enforce scope.  It
62  * receives the static modifier when it's used in front of
63  * DEFINE_PER_CPU() and will trigger build failure if
64  * DECLARE_PER_CPU() is used for the same variable.
65  *
66  * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
67  * such that hidden weak symbol collision, which will cause unrelated
68  * variables to share the same address, can be detected during build.
69  */
70 #define DECLARE_PER_CPU_SECTION(type, name, sec)                        \
71         extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name;             \
72         extern __PCPU_ATTRS(sec) __typeof__(type) name
73
74 #define DEFINE_PER_CPU_SECTION(type, name, sec)                         \
75         __PCPU_DUMMY_ATTRS char __pcpu_scope_##name;                    \
76         extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;            \
77         __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;                   \
78         extern __PCPU_ATTRS(sec) __typeof__(type) name;                 \
79         __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak                 \
80         __typeof__(type) name
81 #else
82 /*
83  * Normal declaration and definition macros.
84  */
85 #define DECLARE_PER_CPU_SECTION(type, name, sec)                        \
86         extern __PCPU_ATTRS(sec) __typeof__(type) name
87
88 #define DEFINE_PER_CPU_SECTION(type, name, sec)                         \
89         __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES                        \
90         __typeof__(type) name
91 #endif
92
93 /*
94  * Variant on the per-CPU variable declaration/definition theme used for
95  * ordinary per-CPU variables.
96  */
97 #define DECLARE_PER_CPU(type, name)                                     \
98         DECLARE_PER_CPU_SECTION(type, name, "")
99
100 #define DEFINE_PER_CPU(type, name)                                      \
101         DEFINE_PER_CPU_SECTION(type, name, "")
102
103 #define DECLARE_PER_CPU_USER_MAPPED(type, name)                         \
104         DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION)
105
106 #define DEFINE_PER_CPU_USER_MAPPED(type, name)                          \
107         DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION)
108
109 /*
110  * Declaration/definition used for per-CPU variables that must come first in
111  * the set of variables.
112  */
113 #define DECLARE_PER_CPU_FIRST(type, name)                               \
114         DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
115
116 #define DEFINE_PER_CPU_FIRST(type, name)                                \
117         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
118
119 /*
120  * Declaration/definition used for per-CPU variables that must be cacheline
121  * aligned under SMP conditions so that, whilst a particular instance of the
122  * data corresponds to a particular CPU, inefficiencies due to direct access by
123  * other CPUs are reduced by preventing the data from unnecessarily spanning
124  * cachelines.
125  *
126  * An example of this would be statistical data, where each CPU's set of data
127  * is updated by that CPU alone, but the data from across all CPUs is collated
128  * by a CPU processing a read from a proc file.
129  */
130 #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name)                      \
131         DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
132         ____cacheline_aligned_in_smp
133
134 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)                       \
135         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
136         ____cacheline_aligned_in_smp
137
138 #define DECLARE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name)          \
139         DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \
140         ____cacheline_aligned_in_smp
141
142 #define DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name)           \
143         DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \
144         ____cacheline_aligned_in_smp
145
146 #define DECLARE_PER_CPU_ALIGNED(type, name)                             \
147         DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)    \
148         ____cacheline_aligned
149
150 #define DEFINE_PER_CPU_ALIGNED(type, name)                              \
151         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)     \
152         ____cacheline_aligned
153
154 /*
155  * Declaration/definition used for per-CPU variables that must be page aligned.
156  */
157 #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name)                        \
158         DECLARE_PER_CPU_SECTION(type, name, "..page_aligned")           \
159         __aligned(PAGE_SIZE)
160
161 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)                         \
162         DEFINE_PER_CPU_SECTION(type, name, "..page_aligned")            \
163         __aligned(PAGE_SIZE)
164 /*
165  * Declaration/definition used for per-CPU variables that must be page aligned and need to be mapped in user mode.
166  */
167 #define DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name)            \
168         DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \
169         __aligned(PAGE_SIZE)
170
171 #define DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name)             \
172         DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \
173         __aligned(PAGE_SIZE)
174
175 /*
176  * Declaration/definition used for per-CPU variables that must be read mostly.
177  */
178 #define DECLARE_PER_CPU_READ_MOSTLY(type, name)                         \
179         DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
180
181 #define DEFINE_PER_CPU_READ_MOSTLY(type, name)                          \
182         DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
183
184 /*
185  * Intermodule exports for per-CPU variables.  sparse forgets about
186  * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
187  * noop if __CHECKER__.
188  */
189 #ifndef __CHECKER__
190 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
191 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
192 #else
193 #define EXPORT_PER_CPU_SYMBOL(var)
194 #define EXPORT_PER_CPU_SYMBOL_GPL(var)
195 #endif
196
197 #endif /* _LINUX_PERCPU_DEFS_H */