Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / kernel / events / internal.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _KERNEL_EVENTS_INTERNAL_H
3 #define _KERNEL_EVENTS_INTERNAL_H
4
5 #include <linux/hardirq.h>
6 #include <linux/uaccess.h>
7 #include <linux/refcount.h>
8
9 /* Buffer handling */
10
11 #define RING_BUFFER_WRITABLE            0x01
12
13 struct ring_buffer {
14         refcount_t                      refcount;
15         struct rcu_head                 rcu_head;
16 #ifdef CONFIG_PERF_USE_VMALLOC
17         struct work_struct              work;
18         int                             page_order;     /* allocation order  */
19 #endif
20         int                             nr_pages;       /* nr of data pages  */
21         int                             overwrite;      /* can overwrite itself */
22         int                             paused;         /* can write into ring buffer */
23
24         atomic_t                        poll;           /* POLL_ for wakeups */
25
26         local_t                         head;           /* write position    */
27         unsigned int                    nest;           /* nested writers    */
28         local_t                         events;         /* event limit       */
29         local_t                         wakeup;         /* wakeup stamp      */
30         local_t                         lost;           /* nr records lost   */
31
32         long                            watermark;      /* wakeup watermark  */
33         long                            aux_watermark;
34         /* poll crap */
35         spinlock_t                      event_lock;
36         struct list_head                event_list;
37
38         atomic_t                        mmap_count;
39         unsigned long                   mmap_locked;
40         struct user_struct              *mmap_user;
41
42         /* AUX area */
43         long                            aux_head;
44         unsigned int                    aux_nest;
45         long                            aux_wakeup;     /* last aux_watermark boundary crossed by aux_head */
46         unsigned long                   aux_pgoff;
47         int                             aux_nr_pages;
48         int                             aux_overwrite;
49         atomic_t                        aux_mmap_count;
50         unsigned long                   aux_mmap_locked;
51         void                            (*free_aux)(void *);
52         refcount_t                      aux_refcount;
53         void                            **aux_pages;
54         void                            *aux_priv;
55
56         struct perf_event_mmap_page     *user_page;
57         void                            *data_pages[0];
58 };
59
60 extern void rb_free(struct ring_buffer *rb);
61
62 static inline void rb_free_rcu(struct rcu_head *rcu_head)
63 {
64         struct ring_buffer *rb;
65
66         rb = container_of(rcu_head, struct ring_buffer, rcu_head);
67         rb_free(rb);
68 }
69
70 static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
71 {
72         if (!pause && rb->nr_pages)
73                 rb->paused = 0;
74         else
75                 rb->paused = 1;
76 }
77
78 extern struct ring_buffer *
79 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
80 extern void perf_event_wakeup(struct perf_event *event);
81 extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
82                         pgoff_t pgoff, int nr_pages, long watermark, int flags);
83 extern void rb_free_aux(struct ring_buffer *rb);
84 extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
85 extern void ring_buffer_put(struct ring_buffer *rb);
86
87 static inline bool rb_has_aux(struct ring_buffer *rb)
88 {
89         return !!rb->aux_nr_pages;
90 }
91
92 void perf_event_aux_event(struct perf_event *event, unsigned long head,
93                           unsigned long size, u64 flags);
94
95 extern struct page *
96 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
97
98 #ifdef CONFIG_PERF_USE_VMALLOC
99 /*
100  * Back perf_mmap() with vmalloc memory.
101  *
102  * Required for architectures that have d-cache aliasing issues.
103  */
104
105 static inline int page_order(struct ring_buffer *rb)
106 {
107         return rb->page_order;
108 }
109
110 #else
111
112 static inline int page_order(struct ring_buffer *rb)
113 {
114         return 0;
115 }
116 #endif
117
118 static inline unsigned long perf_data_size(struct ring_buffer *rb)
119 {
120         return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
121 }
122
123 static inline unsigned long perf_aux_size(struct ring_buffer *rb)
124 {
125         return rb->aux_nr_pages << PAGE_SHIFT;
126 }
127
128 #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)        \
129 {                                                                       \
130         unsigned long size, written;                                    \
131                                                                         \
132         do {                                                            \
133                 size    = min(handle->size, len);                       \
134                 written = memcpy_func(__VA_ARGS__);                     \
135                 written = size - written;                               \
136                                                                         \
137                 len -= written;                                         \
138                 handle->addr += written;                                \
139                 if (advance_buf)                                        \
140                         buf += written;                                 \
141                 handle->size -= written;                                \
142                 if (!handle->size) {                                    \
143                         struct ring_buffer *rb = handle->rb;            \
144                                                                         \
145                         handle->page++;                                 \
146                         handle->page &= rb->nr_pages - 1;               \
147                         handle->addr = rb->data_pages[handle->page];    \
148                         handle->size = PAGE_SIZE << page_order(rb);     \
149                 }                                                       \
150         } while (len && written == size);                               \
151                                                                         \
152         return len;                                                     \
153 }
154
155 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)                      \
156 static inline unsigned long                                             \
157 func_name(struct perf_output_handle *handle,                            \
158           const void *buf, unsigned long len)                           \
159 __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
160
161 static inline unsigned long
162 __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
163                 const void *buf, unsigned long len)
164 {
165         unsigned long orig_len = len;
166         __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
167                                   orig_len - len, size)
168 }
169
170 static inline unsigned long
171 memcpy_common(void *dst, const void *src, unsigned long n)
172 {
173         memcpy(dst, src, n);
174         return 0;
175 }
176
177 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
178
179 static inline unsigned long
180 memcpy_skip(void *dst, const void *src, unsigned long n)
181 {
182         return 0;
183 }
184
185 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
186
187 #ifndef arch_perf_out_copy_user
188 #define arch_perf_out_copy_user arch_perf_out_copy_user
189
190 static inline unsigned long
191 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
192 {
193         unsigned long ret;
194
195         pagefault_disable();
196         ret = __copy_from_user_inatomic(dst, src, n);
197         pagefault_enable();
198
199         return ret;
200 }
201 #endif
202
203 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
204
205 static inline int get_recursion_context(int *recursion)
206 {
207         int rctx;
208
209         if (unlikely(in_nmi()))
210                 rctx = 3;
211         else if (in_irq())
212                 rctx = 2;
213         else if (in_softirq())
214                 rctx = 1;
215         else
216                 rctx = 0;
217
218         if (recursion[rctx])
219                 return -1;
220
221         recursion[rctx]++;
222         barrier();
223
224         return rctx;
225 }
226
227 static inline void put_recursion_context(int *recursion, int rctx)
228 {
229         barrier();
230         recursion[rctx]--;
231 }
232
233 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
234 static inline bool arch_perf_have_user_stack_dump(void)
235 {
236         return true;
237 }
238
239 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
240 #else
241 static inline bool arch_perf_have_user_stack_dump(void)
242 {
243         return false;
244 }
245
246 #define perf_user_stack_pointer(regs) 0
247 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
248
249 #endif /* _KERNEL_EVENTS_INTERNAL_H */