Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / arch / arm64 / include / asm / arch_timer.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * arch/arm64/include/asm/arch_timer.h
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  * Author: Marc Zyngier <marc.zyngier@arm.com>
7  */
8 #ifndef __ASM_ARCH_TIMER_H
9 #define __ASM_ARCH_TIMER_H
10
11 #include <asm/barrier.h>
12 #include <asm/hwcap.h>
13 #include <asm/sysreg.h>
14
15 #include <linux/bug.h>
16 #include <linux/init.h>
17 #include <linux/jump_label.h>
18 #include <linux/smp.h>
19 #include <linux/types.h>
20
21 #include <clocksource/arm_arch_timer.h>
22
23 #if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
24 #define has_erratum_handler(h)                                          \
25         ({                                                              \
26                 const struct arch_timer_erratum_workaround *__wa;       \
27                 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
28                 (__wa && __wa->h);                                      \
29         })
30
31 #define erratum_handler(h)                                              \
32         ({                                                              \
33                 const struct arch_timer_erratum_workaround *__wa;       \
34                 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
35                 (__wa && __wa->h) ? __wa->h : arch_timer_##h;           \
36         })
37
38 #else
39 #define has_erratum_handler(h)                     false
40 #define erratum_handler(h)                         (arch_timer_##h)
41 #endif
42
43 enum arch_timer_erratum_match_type {
44         ate_match_dt,
45         ate_match_local_cap_id,
46         ate_match_acpi_oem_info,
47 };
48
49 struct clock_event_device;
50
51 struct arch_timer_erratum_workaround {
52         enum arch_timer_erratum_match_type match_type;
53         const void *id;
54         const char *desc;
55         u32 (*read_cntp_tval_el0)(void);
56         u32 (*read_cntv_tval_el0)(void);
57         u64 (*read_cntpct_el0)(void);
58         u64 (*read_cntvct_el0)(void);
59         int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
60         int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
61 };
62
63 DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
64                 timer_unstable_counter_workaround);
65
66 /* inline sysreg accessors that make erratum_handler() work */
67 static inline notrace u32 arch_timer_read_cntp_tval_el0(void)
68 {
69         return read_sysreg(cntp_tval_el0);
70 }
71
72 static inline notrace u32 arch_timer_read_cntv_tval_el0(void)
73 {
74         return read_sysreg(cntv_tval_el0);
75 }
76
77 static inline notrace u64 arch_timer_read_cntpct_el0(void)
78 {
79         return read_sysreg(cntpct_el0);
80 }
81
82 static inline notrace u64 arch_timer_read_cntvct_el0(void)
83 {
84         return read_sysreg(cntvct_el0);
85 }
86
87 #define arch_timer_reg_read_stable(reg)                                 \
88         ({                                                              \
89                 u64 _val;                                               \
90                                                                         \
91                 preempt_disable_notrace();                              \
92                 _val = erratum_handler(read_ ## reg)();                 \
93                 preempt_enable_notrace();                               \
94                                                                         \
95                 _val;                                                   \
96         })
97
98 /*
99  * These register accessors are marked inline so the compiler can
100  * nicely work out which register we want, and chuck away the rest of
101  * the code.
102  */
103 static __always_inline
104 void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
105 {
106         if (access == ARCH_TIMER_PHYS_ACCESS) {
107                 switch (reg) {
108                 case ARCH_TIMER_REG_CTRL:
109                         write_sysreg(val, cntp_ctl_el0);
110                         break;
111                 case ARCH_TIMER_REG_TVAL:
112                         write_sysreg(val, cntp_tval_el0);
113                         break;
114                 }
115         } else if (access == ARCH_TIMER_VIRT_ACCESS) {
116                 switch (reg) {
117                 case ARCH_TIMER_REG_CTRL:
118                         write_sysreg(val, cntv_ctl_el0);
119                         break;
120                 case ARCH_TIMER_REG_TVAL:
121                         write_sysreg(val, cntv_tval_el0);
122                         break;
123                 }
124         }
125
126         isb();
127 }
128
129 static __always_inline
130 u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
131 {
132         if (access == ARCH_TIMER_PHYS_ACCESS) {
133                 switch (reg) {
134                 case ARCH_TIMER_REG_CTRL:
135                         return read_sysreg(cntp_ctl_el0);
136                 case ARCH_TIMER_REG_TVAL:
137                         return arch_timer_reg_read_stable(cntp_tval_el0);
138                 }
139         } else if (access == ARCH_TIMER_VIRT_ACCESS) {
140                 switch (reg) {
141                 case ARCH_TIMER_REG_CTRL:
142                         return read_sysreg(cntv_ctl_el0);
143                 case ARCH_TIMER_REG_TVAL:
144                         return arch_timer_reg_read_stable(cntv_tval_el0);
145                 }
146         }
147
148         BUG();
149 }
150
151 static inline u32 arch_timer_get_cntfrq(void)
152 {
153         return read_sysreg(cntfrq_el0);
154 }
155
156 static inline u32 arch_timer_get_cntkctl(void)
157 {
158         return read_sysreg(cntkctl_el1);
159 }
160
161 static inline void arch_timer_set_cntkctl(u32 cntkctl)
162 {
163         write_sysreg(cntkctl, cntkctl_el1);
164         isb();
165 }
166
167 /*
168  * Ensure that reads of the counter are treated the same as memory reads
169  * for the purposes of ordering by subsequent memory barriers.
170  *
171  * This insanity brought to you by speculative system register reads,
172  * out-of-order memory accesses, sequence locks and Thomas Gleixner.
173  *
174  * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
175  */
176 #define arch_counter_enforce_ordering(val) do {                         \
177         u64 tmp, _val = (val);                                          \
178                                                                         \
179         asm volatile(                                                   \
180         "       eor     %0, %1, %1\n"                                   \
181         "       add     %0, sp, %0\n"                                   \
182         "       ldr     xzr, [%0]"                                      \
183         : "=r" (tmp) : "r" (_val));                                     \
184 } while (0)
185
186 static __always_inline u64 __arch_counter_get_cntpct_stable(void)
187 {
188         u64 cnt;
189
190         isb();
191         cnt = arch_timer_reg_read_stable(cntpct_el0);
192         arch_counter_enforce_ordering(cnt);
193         return cnt;
194 }
195
196 static __always_inline u64 __arch_counter_get_cntpct(void)
197 {
198         u64 cnt;
199
200         isb();
201         cnt = read_sysreg(cntpct_el0);
202         arch_counter_enforce_ordering(cnt);
203         return cnt;
204 }
205
206 static __always_inline u64 __arch_counter_get_cntvct_stable(void)
207 {
208         u64 cnt;
209
210         isb();
211         cnt = arch_timer_reg_read_stable(cntvct_el0);
212         arch_counter_enforce_ordering(cnt);
213         return cnt;
214 }
215
216 static __always_inline u64 __arch_counter_get_cntvct(void)
217 {
218         u64 cnt;
219
220         isb();
221         cnt = read_sysreg(cntvct_el0);
222         arch_counter_enforce_ordering(cnt);
223         return cnt;
224 }
225
226 #undef arch_counter_enforce_ordering
227
228 static inline int arch_timer_arch_init(void)
229 {
230         return 0;
231 }
232
233 static inline void arch_timer_set_evtstrm_feature(void)
234 {
235         cpu_set_named_feature(EVTSTRM);
236 #ifdef CONFIG_COMPAT
237         compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
238 #endif
239 }
240
241 static inline bool arch_timer_have_evtstrm_feature(void)
242 {
243         return cpu_have_named_feature(EVTSTRM);
244 }
245 #endif