2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
7 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
10 * The code should have no internal unresolved relocations.
11 * Check with readelf after changing.
14 #include <uapi/linux/time.h>
15 #include <asm/vgtod.h>
18 #include <asm/unistd.h>
20 #include <linux/math64.h>
21 #include <linux/time.h>
23 #define gtod (&VVAR(vsyscall_gtod_data))
25 extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
26 extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
27 extern time_t __vdso_time(time_t *t);
29 #ifdef CONFIG_HPET_TIMER
31 __attribute__((visibility("hidden")));
33 static notrace cycle_t vread_hpet(void)
35 return *(const volatile u32 *)(&hpet_page + HPET_COUNTER);
41 #include <linux/kernel.h>
42 #include <asm/vsyscall.h>
43 #include <asm/fixmap.h>
44 #include <asm/pvclock.h>
46 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
49 asm ("syscall" : "=a" (ret), "=m" (*ts) :
50 "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
51 "memory", "rcx", "r11");
55 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
59 asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
60 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
61 "memory", "rcx", "r11");
65 #ifdef CONFIG_PARAVIRT_CLOCK
67 static notrace const struct pvclock_vsyscall_time_info *get_pvti(int cpu)
69 const struct pvclock_vsyscall_time_info *pvti_base;
70 int idx = cpu / (PAGE_SIZE/PVTI_SIZE);
71 int offset = cpu % (PAGE_SIZE/PVTI_SIZE);
73 BUG_ON(PVCLOCK_FIXMAP_BEGIN + idx > PVCLOCK_FIXMAP_END);
75 pvti_base = (struct pvclock_vsyscall_time_info *)
76 __fix_to_virt(PVCLOCK_FIXMAP_BEGIN+idx);
78 return &pvti_base[offset];
81 static notrace cycle_t vread_pvclock(int *mode)
83 const struct pvclock_vsyscall_time_info *pvti;
92 * Note: hypervisor must guarantee that:
93 * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
94 * 2. that per-CPU pvclock time info is updated if the
95 * underlying CPU changes.
96 * 3. that version is increased whenever underlying CPU
101 cpu = __getcpu() & VGETCPU_CPU_MASK;
102 /* TODO: We can put vcpu id into higher bits of pvti.version.
103 * This will save a couple of cycles by getting rid of
104 * __getcpu() calls (Gleb).
107 pvti = get_pvti(cpu);
109 version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
112 * Test we're still on the cpu as well as the version.
113 * We could have been migrated just after the first
114 * vgetcpu but before fetching the version, so we
115 * wouldn't notice a version change.
117 cpu1 = __getcpu() & VGETCPU_CPU_MASK;
118 } while (unlikely(cpu != cpu1 ||
119 (pvti->pvti.version & 1) ||
120 pvti->pvti.version != version));
122 if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
125 /* refer to tsc.c read_tsc() comment for rationale */
126 last = gtod->cycle_last;
128 if (likely(ret >= last))
137 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
142 "mov %%ebx, %%edx \n"
143 "mov %[clock], %%ebx \n"
144 "call __kernel_vsyscall \n"
145 "mov %%edx, %%ebx \n"
146 : "=a" (ret), "=m" (*ts)
147 : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
152 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
157 "mov %%ebx, %%edx \n"
158 "mov %[tv], %%ebx \n"
159 "call __kernel_vsyscall \n"
160 "mov %%edx, %%ebx \n"
161 : "=a" (ret), "=m" (*tv), "=m" (*tz)
162 : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
167 #ifdef CONFIG_PARAVIRT_CLOCK
169 static notrace cycle_t vread_pvclock(int *mode)
178 notrace static cycle_t vread_tsc(void)
184 * Empirically, a fence (of type that depends on the CPU)
185 * before rdtsc is enough to ensure that rdtsc is ordered
186 * with respect to loads. The various CPU manuals are unclear
187 * as to whether rdtsc can be reordered with later loads,
188 * but no one has ever seen it happen.
191 ret = (cycle_t)__native_read_tsc();
193 last = gtod->cycle_last;
195 if (likely(ret >= last))
199 * GCC likes to generate cmov here, but this branch is extremely
200 * predictable (it's just a funciton of time and the likely is
201 * very likely) and there's a data dependence, so force GCC
202 * to generate a branch instead. I don't barrier() because
203 * we don't actually need a barrier, and if this function
204 * ever gets inlined it will generate worse code.
210 notrace static inline u64 vgetsns(int *mode)
215 if (gtod->vclock_mode == VCLOCK_TSC)
216 cycles = vread_tsc();
217 #ifdef CONFIG_HPET_TIMER
218 else if (gtod->vclock_mode == VCLOCK_HPET)
219 cycles = vread_hpet();
221 #ifdef CONFIG_PARAVIRT_CLOCK
222 else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
223 cycles = vread_pvclock(mode);
227 v = (cycles - gtod->cycle_last) & gtod->mask;
228 return v * gtod->mult;
231 /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
232 notrace static int __always_inline do_realtime(struct timespec *ts)
239 seq = gtod_read_begin(gtod);
240 mode = gtod->vclock_mode;
241 ts->tv_sec = gtod->wall_time_sec;
242 ns = gtod->wall_time_snsec;
243 ns += vgetsns(&mode);
245 } while (unlikely(gtod_read_retry(gtod, seq)));
247 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
253 notrace static int __always_inline do_monotonic(struct timespec *ts)
260 seq = gtod_read_begin(gtod);
261 mode = gtod->vclock_mode;
262 ts->tv_sec = gtod->monotonic_time_sec;
263 ns = gtod->monotonic_time_snsec;
264 ns += vgetsns(&mode);
266 } while (unlikely(gtod_read_retry(gtod, seq)));
268 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
274 notrace static void do_realtime_coarse(struct timespec *ts)
278 seq = gtod_read_begin(gtod);
279 ts->tv_sec = gtod->wall_time_coarse_sec;
280 ts->tv_nsec = gtod->wall_time_coarse_nsec;
281 } while (unlikely(gtod_read_retry(gtod, seq)));
284 notrace static void do_monotonic_coarse(struct timespec *ts)
288 seq = gtod_read_begin(gtod);
289 ts->tv_sec = gtod->monotonic_time_coarse_sec;
290 ts->tv_nsec = gtod->monotonic_time_coarse_nsec;
291 } while (unlikely(gtod_read_retry(gtod, seq)));
294 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
298 if (do_realtime(ts) == VCLOCK_NONE)
301 case CLOCK_MONOTONIC:
302 if (do_monotonic(ts) == VCLOCK_NONE)
305 case CLOCK_REALTIME_COARSE:
306 do_realtime_coarse(ts);
308 case CLOCK_MONOTONIC_COARSE:
309 do_monotonic_coarse(ts);
317 return vdso_fallback_gettime(clock, ts);
319 int clock_gettime(clockid_t, struct timespec *)
320 __attribute__((weak, alias("__vdso_clock_gettime")));
322 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
324 if (likely(tv != NULL)) {
325 if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
326 return vdso_fallback_gtod(tv, tz);
329 if (unlikely(tz != NULL)) {
330 tz->tz_minuteswest = gtod->tz_minuteswest;
331 tz->tz_dsttime = gtod->tz_dsttime;
336 int gettimeofday(struct timeval *, struct timezone *)
337 __attribute__((weak, alias("__vdso_gettimeofday")));
340 * This will break when the xtime seconds get inaccurate, but that is
343 notrace time_t __vdso_time(time_t *t)
345 /* This is atomic on x86 so we don't need any locks. */
346 time_t result = ACCESS_ONCE(gtod->wall_time_sec);
353 __attribute__((weak, alias("__vdso_time")));