2 * NTP client/server, based on OpenNTPD 3.9p1
4 * Author: Adam Tkac <vonsch@gmail.com>
6 * Licensed under GPLv2, see file LICENSE in this source tree.
8 * Parts of OpenNTPD clock syncronization code is replaced by
9 * code which is based on ntp-4.2.6, whuch carries the following
12 ***********************************************************************
14 * Copyright (c) University of Delaware 1992-2009 *
16 * Permission to use, copy, modify, and distribute this software and *
17 * its documentation for any purpose with or without fee is hereby *
18 * granted, provided that the above copyright notice appears in all *
19 * copies and that both the copyright notice and this permission *
20 * notice appear in supporting documentation, and that the name *
21 * University of Delaware not be used in advertising or publicity *
22 * pertaining to distribution of the software without specific, *
23 * written prior permission. The University of Delaware makes no *
24 * representations about the suitability this software for any *
25 * purpose. It is provided "as is" without express or implied *
28 ***********************************************************************
31 //usage:#define ntpd_trivial_usage
32 //usage: "[-dnqNw"IF_FEATURE_NTPD_SERVER("l")"] [-S PROG] [-p PEER]..."
33 //usage:#define ntpd_full_usage "\n\n"
34 //usage: "NTP client/server\n"
35 //usage: "\n -d Verbose"
36 //usage: "\n -n Do not daemonize"
37 //usage: "\n -q Quit after clock is set"
38 //usage: "\n -N Run at high priority"
39 //usage: "\n -w Do not set time (only query peers), implies -n"
40 //usage: IF_FEATURE_NTPD_SERVER(
41 //usage: "\n -l Run as server on port 123"
43 //usage: "\n -S PROG Run PROG after stepping time, stratum change, and every 11 mins"
44 //usage: "\n -p PEER Obtain time from PEER (may be repeated)"
48 #include <netinet/ip.h> /* For IPTOS_LOWDELAY definition */
49 #include <sys/timex.h>
50 #ifndef IPTOS_LOWDELAY
51 # define IPTOS_LOWDELAY 0x10
54 # error "Sorry, your kernel has to support IP_PKTINFO"
58 /* Verbosity control (max level of -dddd options accepted).
59 * max 5 is very talkative (and bloated). 2 is non-bloated,
60 * production level setting.
65 /* High-level description of the algorithm:
67 * We start running with very small poll_exp, BURSTPOLL,
68 * in order to quickly accumulate INITIAL_SAMPLES datapoints
69 * for each peer. Then, time is stepped if the offset is larger
70 * than STEP_THRESHOLD, otherwise it isn't; anyway, we enlarge
71 * poll_exp to MINPOLL and enter frequency measurement step:
72 * we collect new datapoints but ignore them for WATCH_THRESHOLD
73 * seconds. After WATCH_THRESHOLD seconds we look at accumulated
74 * offset and estimate frequency drift.
76 * (frequency measurement step seems to not be strictly needed,
77 * it is conditionally disabled with USING_INITIAL_FREQ_ESTIMATION
80 * After this, we enter "steady state": we collect a datapoint,
81 * we select the best peer, if this datapoint is not a new one
82 * (IOW: if this datapoint isn't for selected peer), sleep
83 * and collect another one; otherwise, use its offset to update
84 * frequency drift, if offset is somewhat large, reduce poll_exp,
85 * otherwise increase poll_exp.
87 * If offset is larger than STEP_THRESHOLD, which shouldn't normally
88 * happen, we assume that something "bad" happened (computer
89 * was hibernated, someone set totally wrong date, etc),
90 * then the time is stepped, all datapoints are discarded,
91 * and we go back to steady state.
94 #define RETRY_INTERVAL 5 /* on error, retry in N secs */
95 #define RESPONSE_INTERVAL 15 /* wait for reply up to N secs */
96 #define INITIAL_SAMPLES 4 /* how many samples do we want for init */
98 /* Clock discipline parameters and constants */
100 /* Step threshold (sec). std ntpd uses 0.128.
101 * Using exact power of 2 (1/8) results in smaller code */
102 #define STEP_THRESHOLD 0.125
103 #define WATCH_THRESHOLD 128 /* stepout threshold (sec). std ntpd uses 900 (11 mins (!)) */
104 /* NB: set WATCH_THRESHOLD to ~60 when debugging to save time) */
105 //UNUSED: #define PANIC_THRESHOLD 1000 /* panic threshold (sec) */
107 #define FREQ_TOLERANCE 0.000015 /* frequency tolerance (15 PPM) */
108 #define BURSTPOLL 0 /* initial poll */
109 #define MINPOLL 5 /* minimum poll interval. std ntpd uses 6 (6: 64 sec) */
110 /* If offset > discipline_jitter * POLLADJ_GATE, and poll interval is >= 2^BIGPOLL,
111 * then it is decreased _at once_. (If < 2^BIGPOLL, it will be decreased _eventually_).
113 #define BIGPOLL 10 /* 2^10 sec ~= 17 min */
114 #define MAXPOLL 12 /* maximum poll interval (12: 1.1h, 17: 36.4h). std ntpd uses 17 */
115 /* Actively lower poll when we see such big offsets.
116 * With STEP_THRESHOLD = 0.125, it means we try to sync more aggressively
117 * if offset increases over ~0.04 sec */
118 #define POLLDOWN_OFFSET (STEP_THRESHOLD / 3)
119 #define MINDISP 0.01 /* minimum dispersion (sec) */
120 #define MAXDISP 16 /* maximum dispersion (sec) */
121 #define MAXSTRAT 16 /* maximum stratum (infinity metric) */
122 #define MAXDIST 1 /* distance threshold (sec) */
123 #define MIN_SELECTED 1 /* minimum intersection survivors */
124 #define MIN_CLUSTERED 3 /* minimum cluster survivors */
126 #define MAXDRIFT 0.000500 /* frequency drift we can correct (500 PPM) */
128 /* Poll-adjust threshold.
129 * When we see that offset is small enough compared to discipline jitter,
130 * we grow a counter: += MINPOLL. When counter goes over POLLADJ_LIMIT,
131 * we poll_exp++. If offset isn't small, counter -= poll_exp*2,
132 * and when it goes below -POLLADJ_LIMIT, we poll_exp--.
133 * (Bumped from 30 to 40 since otherwise I often see poll_exp going *2* steps down)
135 #define POLLADJ_LIMIT 40
136 /* If offset < discipline_jitter * POLLADJ_GATE, then we decide to increase
137 * poll interval (we think we can't improve timekeeping
138 * by staying at smaller poll).
140 #define POLLADJ_GATE 4
141 #define TIMECONST_HACK_GATE 2
142 /* Compromise Allan intercept (sec). doc uses 1500, std ntpd uses 512 */
146 /* FLL loop gain [why it depends on MAXPOLL??] */
147 #define FLL (MAXPOLL + 1)
148 /* Parameter averaging constant */
157 NTP_MSGSIZE_NOAUTH = 48,
158 NTP_MSGSIZE = (NTP_MSGSIZE_NOAUTH + 4 + NTP_DIGESTSIZE),
161 MODE_MASK = (7 << 0),
162 VERSION_MASK = (7 << 3),
166 /* Leap Second Codes (high order two bits of m_status) */
167 LI_NOWARNING = (0 << 6), /* no warning */
168 LI_PLUSSEC = (1 << 6), /* add a second (61 seconds) */
169 LI_MINUSSEC = (2 << 6), /* minus a second (59 seconds) */
170 LI_ALARM = (3 << 6), /* alarm condition */
173 MODE_RES0 = 0, /* reserved */
174 MODE_SYM_ACT = 1, /* symmetric active */
175 MODE_SYM_PAS = 2, /* symmetric passive */
176 MODE_CLIENT = 3, /* client */
177 MODE_SERVER = 4, /* server */
178 MODE_BROADCAST = 5, /* broadcast */
179 MODE_RES1 = 6, /* reserved for NTP control message */
180 MODE_RES2 = 7, /* reserved for private use */
183 //TODO: better base selection
184 #define OFFSET_1900_1970 2208988800UL /* 1970 - 1900 in seconds */
186 #define NUM_DATAPOINTS 8
199 uint8_t m_status; /* status of local clock and leap info */
201 uint8_t m_ppoll; /* poll value */
202 int8_t m_precision_exp;
203 s_fixedpt_t m_rootdelay;
204 s_fixedpt_t m_rootdisp;
206 l_fixedpt_t m_reftime;
207 l_fixedpt_t m_orgtime;
208 l_fixedpt_t m_rectime;
209 l_fixedpt_t m_xmttime;
211 uint8_t m_digest[NTP_DIGESTSIZE];
221 len_and_sockaddr *p_lsa;
223 /* when to send new query (if p_fd == -1)
224 * or when receive times out (if p_fd >= 0): */
227 uint32_t lastpkt_refid;
228 uint8_t lastpkt_status;
229 uint8_t lastpkt_stratum;
230 uint8_t reachable_bits;
231 double next_action_time;
233 double lastpkt_recv_time;
234 double lastpkt_delay;
235 double lastpkt_rootdelay;
236 double lastpkt_rootdisp;
237 /* produced by filter algorithm: */
238 double filter_offset;
239 double filter_dispersion;
240 double filter_jitter;
241 datapoint_t filter_datapoint[NUM_DATAPOINTS];
242 /* last sent packet: */
247 #define USING_KERNEL_PLL_LOOP 1
248 #define USING_INITIAL_FREQ_ESTIMATION 0
255 /* Insert new options above this line. */
256 /* Non-compat options: */
260 OPT_l = (1 << 7) * ENABLE_FEATURE_NTPD_SERVER,
261 /* We hijack some bits for other purposes */
267 /* total round trip delay to currently selected reference clock */
269 /* reference timestamp: time when the system clock was last set or corrected */
271 /* total dispersion to currently selected reference clock */
274 double last_script_run;
277 #if ENABLE_FEATURE_NTPD_SERVER
282 /* refid: 32-bit code identifying the particular server or reference clock
283 * in stratum 0 packets this is a four-character ASCII string,
284 * called the kiss code, used for debugging and monitoring
285 * in stratum 1 packets this is a four-character ASCII string
286 * assigned to the reference clock by IANA. Example: "GPS "
287 * in stratum 2+ packets, it's IPv4 address or 4 first bytes
288 * of MD5 hash of IPv6
292 /* precision is defined as the larger of the resolution and time to
293 * read the clock, in log2 units. For instance, the precision of a
294 * mains-frequency clock incrementing at 60 Hz is 16 ms, even when the
295 * system clock hardware representation is to the nanosecond.
297 * Delays, jitters of various kinds are clamped down to precision.
299 * If precision_sec is too large, discipline_jitter gets clamped to it
300 * and if offset is smaller than discipline_jitter * POLLADJ_GATE, poll
301 * interval grows even though we really can benefit from staying at
302 * smaller one, collecting non-lagged datapoits and correcting offset.
303 * (Lagged datapoits exist when poll_exp is large but we still have
304 * systematic offset error - the time distance between datapoints
305 * is significant and older datapoints have smaller offsets.
306 * This makes our offset estimation a bit smaller than reality)
307 * Due to this effect, setting G_precision_sec close to
308 * STEP_THRESHOLD isn't such a good idea - offsets may grow
309 * too big and we will step. I observed it with -6.
311 * OTOH, setting precision_sec far too small would result in futile
312 * attempts to syncronize to an unachievable precision.
314 * -6 is 1/64 sec, -7 is 1/128 sec and so on.
315 * -8 is 1/256 ~= 0.003906 (worked well for me --vda)
316 * -9 is 1/512 ~= 0.001953 (let's try this for some time)
318 #define G_precision_exp -9
320 * G_precision_exp is used only for construction outgoing packets.
321 * It's ok to set G_precision_sec to a slightly different value
322 * (One which is "nicer looking" in logs).
323 * Exact value would be (1.0 / (1 << (- G_precision_exp))):
325 #define G_precision_sec 0.002
327 /* Bool. After set to 1, never goes back to 0: */
328 smallint initial_poll_complete;
330 #define STATE_NSET 0 /* initial state, "nothing is set" */
331 //#define STATE_FSET 1 /* frequency set from file */
332 #define STATE_SPIK 2 /* spike detected */
333 //#define STATE_FREQ 3 /* initial frequency */
334 #define STATE_SYNC 4 /* clock synchronized (normal operation) */
335 uint8_t discipline_state; // doc calls it c.state
336 uint8_t poll_exp; // s.poll
337 int polladj_count; // c.count
338 long kernel_freq_drift;
339 peer_t *last_update_peer;
340 double last_update_offset; // c.last
341 double last_update_recv_time; // s.t
342 double discipline_jitter; // c.jitter
343 /* Since we only compare it with ints, can simplify code
344 * by not making this variable floating point:
346 unsigned offset_to_jitter_ratio;
347 //double cluster_offset; // s.offset
348 //double cluster_jitter; // s.jitter
349 #if !USING_KERNEL_PLL_LOOP
350 double discipline_freq_drift; // c.freq
351 /* Maybe conditionally calculate wander? it's used only for logging */
352 double discipline_wander; // c.wander
355 #define G (*ptr_to_globals)
357 static const int const_IPTOS_LOWDELAY = IPTOS_LOWDELAY;
360 #define VERB1 if (MAX_VERBOSE && G.verbose)
361 #define VERB2 if (MAX_VERBOSE >= 2 && G.verbose >= 2)
362 #define VERB3 if (MAX_VERBOSE >= 3 && G.verbose >= 3)
363 #define VERB4 if (MAX_VERBOSE >= 4 && G.verbose >= 4)
364 #define VERB5 if (MAX_VERBOSE >= 5 && G.verbose >= 5)
367 static double LOG2D(int a)
370 return 1.0 / (1UL << -a);
373 static ALWAYS_INLINE double SQUARE(double x)
377 static ALWAYS_INLINE double MAXD(double a, double b)
383 static ALWAYS_INLINE double MIND(double a, double b)
389 static NOINLINE double my_SQRT(double X)
396 double Xhalf = X * 0.5;
398 /* Fast and good approximation to 1/sqrt(X), black magic */
400 /*v.i = 0x5f3759df - (v.i >> 1);*/
401 v.i = 0x5f375a86 - (v.i >> 1); /* - this constant is slightly better */
402 invsqrt = v.f; /* better than 0.2% accuracy */
404 /* Refining it using Newton's method: x1 = x0 - f(x0)/f'(x0)
405 * f(x) = 1/(x*x) - X (f==0 when x = 1/sqrt(X))
407 * f(x)/f'(x) = (X - 1/(x*x)) / (2/(x*x*x)) = X*x*x*x/2 - x/2
408 * x1 = x0 - (X*x0*x0*x0/2 - x0/2) = 1.5*x0 - X*x0*x0*x0/2 = x0*(1.5 - (X/2)*x0*x0)
410 invsqrt = invsqrt * (1.5 - Xhalf * invsqrt * invsqrt); /* ~0.05% accuracy */
411 /* invsqrt = invsqrt * (1.5 - Xhalf * invsqrt * invsqrt); 2nd iter: ~0.0001% accuracy */
412 /* With 4 iterations, more than half results will be exact,
413 * at 6th iterations result stabilizes with about 72% results exact.
414 * We are well satisfied with 0.05% accuracy.
417 return X * invsqrt; /* X * 1/sqrt(X) ~= sqrt(X) */
419 static ALWAYS_INLINE double SQRT(double X)
421 /* If this arch doesn't use IEEE 754 floats, fall back to using libm */
422 if (sizeof(float) != 4)
425 /* This avoids needing libm, saves about 0.5k on x86-32 */
433 gettimeofday(&tv, NULL); /* never fails */
434 G.cur_time = tv.tv_sec + (1.0e-6 * tv.tv_usec) + OFFSET_1900_1970;
439 d_to_tv(double d, struct timeval *tv)
441 tv->tv_sec = (long)d;
442 tv->tv_usec = (d - tv->tv_sec) * 1000000;
446 lfp_to_d(l_fixedpt_t lfp)
449 lfp.int_partl = ntohl(lfp.int_partl);
450 lfp.fractionl = ntohl(lfp.fractionl);
451 ret = (double)lfp.int_partl + ((double)lfp.fractionl / UINT_MAX);
455 sfp_to_d(s_fixedpt_t sfp)
458 sfp.int_parts = ntohs(sfp.int_parts);
459 sfp.fractions = ntohs(sfp.fractions);
460 ret = (double)sfp.int_parts + ((double)sfp.fractions / USHRT_MAX);
463 #if ENABLE_FEATURE_NTPD_SERVER
468 lfp.int_partl = (uint32_t)d;
469 lfp.fractionl = (uint32_t)((d - lfp.int_partl) * UINT_MAX);
470 lfp.int_partl = htonl(lfp.int_partl);
471 lfp.fractionl = htonl(lfp.fractionl);
478 sfp.int_parts = (uint16_t)d;
479 sfp.fractions = (uint16_t)((d - sfp.int_parts) * USHRT_MAX);
480 sfp.int_parts = htons(sfp.int_parts);
481 sfp.fractions = htons(sfp.fractions);
487 dispersion(const datapoint_t *dp)
489 return dp->d_dispersion + FREQ_TOLERANCE * (G.cur_time - dp->d_recv_time);
493 root_distance(peer_t *p)
495 /* The root synchronization distance is the maximum error due to
496 * all causes of the local clock relative to the primary server.
497 * It is defined as half the total delay plus total dispersion
500 return MAXD(MINDISP, p->lastpkt_rootdelay + p->lastpkt_delay) / 2
501 + p->lastpkt_rootdisp
502 + p->filter_dispersion
503 + FREQ_TOLERANCE * (G.cur_time - p->lastpkt_recv_time)
508 set_next(peer_t *p, unsigned t)
510 p->next_action_time = G.cur_time + t;
514 * Peer clock filter and its helpers
517 filter_datapoints(peer_t *p)
521 double minoff, maxoff, wavg, sum, w;
522 double x = x; /* for compiler */
523 double oldest_off = oldest_off;
524 double oldest_age = oldest_age;
525 double newest_off = newest_off;
526 double newest_age = newest_age;
528 minoff = maxoff = p->filter_datapoint[0].d_offset;
529 for (i = 1; i < NUM_DATAPOINTS; i++) {
530 if (minoff > p->filter_datapoint[i].d_offset)
531 minoff = p->filter_datapoint[i].d_offset;
532 if (maxoff < p->filter_datapoint[i].d_offset)
533 maxoff = p->filter_datapoint[i].d_offset;
536 idx = p->datapoint_idx; /* most recent datapoint */
538 * Drop two outliers and take weighted average of the rest:
539 * most_recent/2 + older1/4 + older2/8 ... + older5/32 + older6/32
540 * we use older6/32, not older6/64 since sum of weights should be 1:
541 * 1/2 + 1/4 + 1/8 + 1/16 + 1/32 + 1/32 = 1
547 * filter_dispersion = \ -------------
554 for (i = 0; i < NUM_DATAPOINTS; i++) {
556 bb_error_msg("datapoint[%d]: off:%f disp:%f(%f) age:%f%s",
558 p->filter_datapoint[idx].d_offset,
559 p->filter_datapoint[idx].d_dispersion, dispersion(&p->filter_datapoint[idx]),
560 G.cur_time - p->filter_datapoint[idx].d_recv_time,
561 (minoff == p->filter_datapoint[idx].d_offset || maxoff == p->filter_datapoint[idx].d_offset)
562 ? " (outlier by offset)" : ""
566 sum += dispersion(&p->filter_datapoint[idx]) / (2 << i);
568 if (minoff == p->filter_datapoint[idx].d_offset) {
569 minoff -= 1; /* so that we don't match it ever again */
571 if (maxoff == p->filter_datapoint[idx].d_offset) {
574 oldest_off = p->filter_datapoint[idx].d_offset;
575 oldest_age = G.cur_time - p->filter_datapoint[idx].d_recv_time;
578 newest_off = oldest_off;
579 newest_age = oldest_age;
586 idx = (idx - 1) & (NUM_DATAPOINTS - 1);
588 p->filter_dispersion = sum;
589 wavg += x; /* add another older6/64 to form older6/32 */
590 /* Fix systematic underestimation with large poll intervals.
591 * Imagine that we still have a bit of uncorrected drift,
592 * and poll interval is big (say, 100 sec). Offsets form a progression:
593 * 0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 - 0.7 is most recent.
594 * The algorithm above drops 0.0 and 0.7 as outliers,
595 * and then we have this estimation, ~25% off from 0.7:
596 * 0.1/32 + 0.2/32 + 0.3/16 + 0.4/8 + 0.5/4 + 0.6/2 = 0.503125
598 x = oldest_age - newest_age;
600 x = newest_age / x; /* in above example, 100 / (600 - 100) */
601 if (x < 1) { /* paranoia check */
602 x = (newest_off - oldest_off) * x; /* 0.5 * 100/500 = 0.1 */
606 p->filter_offset = wavg;
608 /* +----- -----+ ^ 1/2
612 * filter_jitter = | --- * / (avg-offset_j) |
616 * where n is the number of valid datapoints in the filter (n > 1);
617 * if filter_jitter < precision then filter_jitter = precision
620 for (i = 0; i < NUM_DATAPOINTS; i++) {
621 sum += SQUARE(wavg - p->filter_datapoint[i].d_offset);
623 sum = SQRT(sum / NUM_DATAPOINTS);
624 p->filter_jitter = sum > G_precision_sec ? sum : G_precision_sec;
626 VERB3 bb_error_msg("filter offset:%+f(corr:%e) disp:%f jitter:%f",
628 p->filter_dispersion,
633 reset_peer_stats(peer_t *p, double offset)
636 bool small_ofs = fabs(offset) < 16 * STEP_THRESHOLD;
638 for (i = 0; i < NUM_DATAPOINTS; i++) {
640 p->filter_datapoint[i].d_recv_time += offset;
641 if (p->filter_datapoint[i].d_offset != 0) {
642 p->filter_datapoint[i].d_offset -= offset;
643 //bb_error_msg("p->filter_datapoint[%d].d_offset %f -> %f",
645 // p->filter_datapoint[i].d_offset + offset,
646 // p->filter_datapoint[i].d_offset);
649 p->filter_datapoint[i].d_recv_time = G.cur_time;
650 p->filter_datapoint[i].d_offset = 0;
651 p->filter_datapoint[i].d_dispersion = MAXDISP;
655 p->lastpkt_recv_time += offset;
657 p->reachable_bits = 0;
658 p->lastpkt_recv_time = G.cur_time;
660 filter_datapoints(p); /* recalc p->filter_xxx */
661 VERB5 bb_error_msg("%s->lastpkt_recv_time=%f", p->p_dotted, p->lastpkt_recv_time);
669 p = xzalloc(sizeof(*p));
670 p->p_lsa = xhost2sockaddr(s, 123);
671 p->p_dotted = xmalloc_sockaddr2dotted_noport(&p->p_lsa->u.sa);
673 p->p_xmt_msg.m_status = MODE_CLIENT | (NTP_VERSION << 3);
674 p->next_action_time = G.cur_time; /* = set_next(p, 0); */
675 reset_peer_stats(p, 16 * STEP_THRESHOLD);
677 llist_add_to(&G.ntp_peers, p);
683 const struct sockaddr *from, const struct sockaddr *to, socklen_t addrlen,
684 msg_t *msg, ssize_t len)
690 ret = sendto(fd, msg, len, MSG_DONTWAIT, to, addrlen);
692 ret = send_to_from(fd, msg, len, MSG_DONTWAIT, to, from, addrlen);
695 bb_perror_msg("send failed");
702 send_query_to_peer(peer_t *p)
704 /* Why do we need to bind()?
705 * See what happens when we don't bind:
707 * socket(PF_INET, SOCK_DGRAM, IPPROTO_IP) = 3
708 * setsockopt(3, SOL_IP, IP_TOS, [16], 4) = 0
709 * gettimeofday({1259071266, 327885}, NULL) = 0
710 * sendto(3, "xxx", 48, MSG_DONTWAIT, {sa_family=AF_INET, sin_port=htons(123), sin_addr=inet_addr("10.34.32.125")}, 16) = 48
711 * ^^^ we sent it from some source port picked by kernel.
712 * time(NULL) = 1259071266
713 * write(2, "ntpd: entering poll 15 secs\n", 28) = 28
714 * poll([{fd=3, events=POLLIN}], 1, 15000) = 1 ([{fd=3, revents=POLLIN}])
715 * recv(3, "yyy", 68, MSG_DONTWAIT) = 48
716 * ^^^ this recv will receive packets to any local port!
718 * Uncomment this and use strace to see it in action:
720 #define PROBE_LOCAL_ADDR /* { len_and_sockaddr lsa; lsa.len = LSA_SIZEOF_SA; getsockname(p->query.fd, &lsa.u.sa, &lsa.len); } */
724 len_and_sockaddr *local_lsa;
726 family = p->p_lsa->u.sa.sa_family;
727 p->p_fd = fd = xsocket_type(&local_lsa, family, SOCK_DGRAM);
728 /* local_lsa has "null" address and port 0 now.
729 * bind() ensures we have a *particular port* selected by kernel
730 * and remembered in p->p_fd, thus later recv(p->p_fd)
731 * receives only packets sent to this port.
734 xbind(fd, &local_lsa->u.sa, local_lsa->len);
736 #if ENABLE_FEATURE_IPV6
737 if (family == AF_INET)
739 setsockopt(fd, IPPROTO_IP, IP_TOS, &const_IPTOS_LOWDELAY, sizeof(const_IPTOS_LOWDELAY));
743 /* Emit message _before_ attempted send. Think of a very short
744 * roundtrip networks: we need to go back to recv loop ASAP,
745 * to reduce delay. Printing messages after send works against that.
747 VERB1 bb_error_msg("sending query to %s", p->p_dotted);
750 * Send out a random 64-bit number as our transmit time. The NTP
751 * server will copy said number into the originate field on the
752 * response that it sends us. This is totally legal per the SNTP spec.
754 * The impact of this is two fold: we no longer send out the current
755 * system time for the world to see (which may aid an attacker), and
756 * it gives us a (not very secure) way of knowing that we're not
757 * getting spoofed by an attacker that can't capture our traffic
758 * but can spoof packets from the NTP server we're communicating with.
760 * Save the real transmit timestamp locally.
762 p->p_xmt_msg.m_xmttime.int_partl = random();
763 p->p_xmt_msg.m_xmttime.fractionl = random();
764 p->p_xmttime = gettime1900d();
766 if (do_sendto(p->p_fd, /*from:*/ NULL, /*to:*/ &p->p_lsa->u.sa, /*addrlen:*/ p->p_lsa->len,
767 &p->p_xmt_msg, NTP_MSGSIZE_NOAUTH) == -1
771 set_next(p, RETRY_INTERVAL);
775 p->reachable_bits <<= 1;
776 set_next(p, RESPONSE_INTERVAL);
780 /* Note that there is no provision to prevent several run_scripts
781 * to be done in quick succession. In fact, it happens rather often
782 * if initial syncronization results in a step.
783 * You will see "step" and then "stratum" script runs, sometimes
784 * as close as only 0.002 seconds apart.
785 * Script should be ready to deal with this.
787 static void run_script(const char *action, double offset)
790 char *env1, *env2, *env3, *env4;
795 argv[0] = (char*) G.script_name;
796 argv[1] = (char*) action;
799 VERB1 bb_error_msg("executing '%s %s'", G.script_name, action);
801 env1 = xasprintf("%s=%u", "stratum", G.stratum);
803 env2 = xasprintf("%s=%ld", "freq_drift_ppm", G.kernel_freq_drift);
805 env3 = xasprintf("%s=%u", "poll_interval", 1 << G.poll_exp);
807 env4 = xasprintf("%s=%f", "offset", offset);
809 /* Other items of potential interest: selected peer,
810 * rootdelay, reftime, rootdisp, refid, ntp_status,
811 * last_update_offset, last_update_recv_time, discipline_jitter,
812 * how many peers have reachable_bits = 0?
815 /* Don't want to wait: it may run hwclock --systohc, and that
816 * may take some time (seconds): */
817 /*spawn_and_wait(argv);*/
821 unsetenv("freq_drift_ppm");
822 unsetenv("poll_interval");
829 G.last_script_run = G.cur_time;
833 step_time(double offset)
837 struct timeval tvc, tvn;
838 char buf[sizeof("yyyy-mm-dd hh:mm:ss") + /*paranoia:*/ 4];
841 gettimeofday(&tvc, NULL); /* never fails */
842 dtime = tvc.tv_sec + (1.0e-6 * tvc.tv_usec) + offset;
843 d_to_tv(dtime, &tvn);
844 if (settimeofday(&tvn, NULL) == -1)
845 bb_perror_msg_and_die("settimeofday");
849 strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", localtime(&tval));
850 bb_error_msg("current time is %s.%06u", buf, (unsigned)tvc.tv_usec);
853 strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", localtime(&tval));
854 bb_error_msg("setting time to %s.%06u (offset %+fs)", buf, (unsigned)tvn.tv_usec, offset);
856 /* Correct various fields which contain time-relative values: */
858 /* p->lastpkt_recv_time, p->next_action_time and such: */
859 for (item = G.ntp_peers; item != NULL; item = item->link) {
860 peer_t *pp = (peer_t *) item->data;
861 reset_peer_stats(pp, offset);
862 //bb_error_msg("offset:%+f pp->next_action_time:%f -> %f",
863 // offset, pp->next_action_time, pp->next_action_time + offset);
864 pp->next_action_time += offset;
867 G.cur_time += offset;
868 G.last_update_recv_time += offset;
869 G.last_script_run += offset;
874 * Selection and clustering, and their helpers
880 double opt_rd; /* optimization */
883 compare_point_edge(const void *aa, const void *bb)
885 const point_t *a = aa;
886 const point_t *b = bb;
887 if (a->edge < b->edge) {
890 return (a->edge > b->edge);
897 compare_survivor_metric(const void *aa, const void *bb)
899 const survivor_t *a = aa;
900 const survivor_t *b = bb;
901 if (a->metric < b->metric) {
904 return (a->metric > b->metric);
907 fit(peer_t *p, double rd)
909 if ((p->reachable_bits & (p->reachable_bits-1)) == 0) {
910 /* One or zero bits in reachable_bits */
911 VERB3 bb_error_msg("peer %s unfit for selection: unreachable", p->p_dotted);
914 #if 0 /* we filter out such packets earlier */
915 if ((p->lastpkt_status & LI_ALARM) == LI_ALARM
916 || p->lastpkt_stratum >= MAXSTRAT
918 VERB3 bb_error_msg("peer %s unfit for selection: bad status/stratum", p->p_dotted);
922 /* rd is root_distance(p) */
923 if (rd > MAXDIST + FREQ_TOLERANCE * (1 << G.poll_exp)) {
924 VERB3 bb_error_msg("peer %s unfit for selection: root distance too high", p->p_dotted);
928 // /* Do we have a loop? */
929 // if (p->refid == p->dstaddr || p->refid == s.refid)
934 select_and_cluster(void)
939 int size = 3 * G.peer_cnt;
940 /* for selection algorithm */
942 unsigned num_points, num_candidates;
944 unsigned num_falsetickers;
945 /* for cluster algorithm */
946 survivor_t survivor[size];
947 unsigned num_survivors;
953 if (G.initial_poll_complete) while (item != NULL) {
956 p = (peer_t *) item->data;
957 rd = root_distance(p);
958 offset = p->filter_offset;
964 VERB4 bb_error_msg("interval: [%f %f %f] %s",
970 point[num_points].p = p;
971 point[num_points].type = -1;
972 point[num_points].edge = offset - rd;
973 point[num_points].opt_rd = rd;
975 point[num_points].p = p;
976 point[num_points].type = 0;
977 point[num_points].edge = offset;
978 point[num_points].opt_rd = rd;
980 point[num_points].p = p;
981 point[num_points].type = 1;
982 point[num_points].edge = offset + rd;
983 point[num_points].opt_rd = rd;
987 num_candidates = num_points / 3;
988 if (num_candidates == 0) {
989 VERB3 bb_error_msg("no valid datapoints, no peer selected");
992 //TODO: sorting does not seem to be done in reference code
993 qsort(point, num_points, sizeof(point[0]), compare_point_edge);
995 /* Start with the assumption that there are no falsetickers.
996 * Attempt to find a nonempty intersection interval containing
997 * the midpoints of all truechimers.
998 * If a nonempty interval cannot be found, increase the number
999 * of assumed falsetickers by one and try again.
1000 * If a nonempty interval is found and the number of falsetickers
1001 * is less than the number of truechimers, a majority has been found
1002 * and the midpoint of each truechimer represents
1003 * the candidates available to the cluster algorithm.
1005 num_falsetickers = 0;
1008 unsigned num_midpoints = 0;
1013 for (i = 0; i < num_points; i++) {
1015 * if (point[i].type == -1) c++;
1016 * if (point[i].type == 1) c--;
1017 * and it's simpler to do it this way:
1020 if (c >= num_candidates - num_falsetickers) {
1021 /* If it was c++ and it got big enough... */
1022 low = point[i].edge;
1025 if (point[i].type == 0)
1029 for (i = num_points-1; i >= 0; i--) {
1031 if (c >= num_candidates - num_falsetickers) {
1032 high = point[i].edge;
1035 if (point[i].type == 0)
1038 /* If the number of midpoints is greater than the number
1039 * of allowed falsetickers, the intersection contains at
1040 * least one truechimer with no midpoint - bad.
1041 * Also, interval should be nonempty.
1043 if (num_midpoints <= num_falsetickers && low < high)
1046 if (num_falsetickers * 2 >= num_candidates) {
1047 VERB3 bb_error_msg("too many falsetickers:%d (candidates:%d), no peer selected",
1048 num_falsetickers, num_candidates);
1052 VERB3 bb_error_msg("selected interval: [%f, %f]; candidates:%d falsetickers:%d",
1053 low, high, num_candidates, num_falsetickers);
1057 /* Construct a list of survivors (p, metric)
1058 * from the chime list, where metric is dominated
1059 * first by stratum and then by root distance.
1060 * All other things being equal, this is the order of preference.
1063 for (i = 0; i < num_points; i++) {
1064 if (point[i].edge < low || point[i].edge > high)
1067 survivor[num_survivors].p = p;
1068 /* x.opt_rd == root_distance(p); */
1069 survivor[num_survivors].metric = MAXDIST * p->lastpkt_stratum + point[i].opt_rd;
1070 VERB4 bb_error_msg("survivor[%d] metric:%f peer:%s",
1071 num_survivors, survivor[num_survivors].metric, p->p_dotted);
1074 /* There must be at least MIN_SELECTED survivors to satisfy the
1075 * correctness assertions. Ordinarily, the Byzantine criteria
1076 * require four survivors, but for the demonstration here, one
1079 if (num_survivors < MIN_SELECTED) {
1080 VERB3 bb_error_msg("num_survivors %d < %d, no peer selected",
1081 num_survivors, MIN_SELECTED);
1085 //looks like this is ONLY used by the fact that later we pick survivor[0].
1086 //we can avoid sorting then, just find the minimum once!
1087 qsort(survivor, num_survivors, sizeof(survivor[0]), compare_survivor_metric);
1089 /* For each association p in turn, calculate the selection
1090 * jitter p->sjitter as the square root of the sum of squares
1091 * (p->offset - q->offset) over all q associations. The idea is
1092 * to repeatedly discard the survivor with maximum selection
1093 * jitter until a termination condition is met.
1096 unsigned max_idx = max_idx;
1097 double max_selection_jitter = max_selection_jitter;
1098 double min_jitter = min_jitter;
1100 if (num_survivors <= MIN_CLUSTERED) {
1101 VERB3 bb_error_msg("num_survivors %d <= %d, not discarding more",
1102 num_survivors, MIN_CLUSTERED);
1106 /* To make sure a few survivors are left
1107 * for the clustering algorithm to chew on,
1108 * we stop if the number of survivors
1109 * is less than or equal to MIN_CLUSTERED (3).
1111 for (i = 0; i < num_survivors; i++) {
1112 double selection_jitter_sq;
1115 if (i == 0 || p->filter_jitter < min_jitter)
1116 min_jitter = p->filter_jitter;
1118 selection_jitter_sq = 0;
1119 for (j = 0; j < num_survivors; j++) {
1120 peer_t *q = survivor[j].p;
1121 selection_jitter_sq += SQUARE(p->filter_offset - q->filter_offset);
1123 if (i == 0 || selection_jitter_sq > max_selection_jitter) {
1124 max_selection_jitter = selection_jitter_sq;
1127 VERB5 bb_error_msg("survivor %d selection_jitter^2:%f",
1128 i, selection_jitter_sq);
1130 max_selection_jitter = SQRT(max_selection_jitter / num_survivors);
1131 VERB4 bb_error_msg("max_selection_jitter (at %d):%f min_jitter:%f",
1132 max_idx, max_selection_jitter, min_jitter);
1134 /* If the maximum selection jitter is less than the
1135 * minimum peer jitter, then tossing out more survivors
1136 * will not lower the minimum peer jitter, so we might
1139 if (max_selection_jitter < min_jitter) {
1140 VERB3 bb_error_msg("max_selection_jitter:%f < min_jitter:%f, num_survivors:%d, not discarding more",
1141 max_selection_jitter, min_jitter, num_survivors);
1145 /* Delete survivor[max_idx] from the list
1146 * and go around again.
1148 VERB5 bb_error_msg("dropping survivor %d", max_idx);
1150 while (max_idx < num_survivors) {
1151 survivor[max_idx] = survivor[max_idx + 1];
1157 /* Combine the offsets of the clustering algorithm survivors
1158 * using a weighted average with weight determined by the root
1159 * distance. Compute the selection jitter as the weighted RMS
1160 * difference between the first survivor and the remaining
1161 * survivors. In some cases the inherent clock jitter can be
1162 * reduced by not using this algorithm, especially when frequent
1163 * clockhopping is involved. bbox: thus we don't do it.
1167 for (i = 0; i < num_survivors; i++) {
1169 x = root_distance(p);
1171 z += p->filter_offset / x;
1172 w += SQUARE(p->filter_offset - survivor[0].p->filter_offset) / x;
1174 //G.cluster_offset = z / y;
1175 //G.cluster_jitter = SQRT(w / y);
1178 /* Pick the best clock. If the old system peer is on the list
1179 * and at the same stratum as the first survivor on the list,
1180 * then don't do a clock hop. Otherwise, select the first
1181 * survivor on the list as the new system peer.
1184 if (G.last_update_peer
1185 && G.last_update_peer->lastpkt_stratum <= p->lastpkt_stratum
1187 /* Starting from 1 is ok here */
1188 for (i = 1; i < num_survivors; i++) {
1189 if (G.last_update_peer == survivor[i].p) {
1190 VERB4 bb_error_msg("keeping old synced peer");
1191 p = G.last_update_peer;
1196 G.last_update_peer = p;
1198 VERB3 bb_error_msg("selected peer %s filter_offset:%+f age:%f",
1201 G.cur_time - p->lastpkt_recv_time
1208 * Local clock discipline and its helpers
1211 set_new_values(int disc_state, double offset, double recv_time)
1213 /* Enter new state and set state variables. Note we use the time
1214 * of the last clock filter sample, which must be earlier than
1217 VERB3 bb_error_msg("disc_state=%d last update offset=%f recv_time=%f",
1218 disc_state, offset, recv_time);
1219 G.discipline_state = disc_state;
1220 G.last_update_offset = offset;
1221 G.last_update_recv_time = recv_time;
1223 /* Return: -1: decrease poll interval, 0: leave as is, 1: increase */
1225 update_local_clock(peer_t *p)
1229 /* Note: can use G.cluster_offset instead: */
1230 double offset = p->filter_offset;
1231 double recv_time = p->lastpkt_recv_time;
1233 #if !USING_KERNEL_PLL_LOOP
1236 double since_last_update;
1237 double etemp, dtemp;
1239 abs_offset = fabs(offset);
1242 /* If needed, -S script can do it by looking at $offset
1243 * env var and killing parent */
1244 /* If the offset is too large, give up and go home */
1245 if (abs_offset > PANIC_THRESHOLD) {
1246 bb_error_msg_and_die("offset %f far too big, exiting", offset);
1250 /* If this is an old update, for instance as the result
1251 * of a system peer change, avoid it. We never use
1252 * an old sample or the same sample twice.
1254 if (recv_time <= G.last_update_recv_time) {
1255 VERB3 bb_error_msg("same or older datapoint: %f >= %f, not using it",
1256 G.last_update_recv_time, recv_time);
1257 return 0; /* "leave poll interval as is" */
1260 /* Clock state machine transition function. This is where the
1261 * action is and defines how the system reacts to large time
1262 * and frequency errors.
1264 since_last_update = recv_time - G.reftime;
1265 #if !USING_KERNEL_PLL_LOOP
1268 #if USING_INITIAL_FREQ_ESTIMATION
1269 if (G.discipline_state == STATE_FREQ) {
1270 /* Ignore updates until the stepout threshold */
1271 if (since_last_update < WATCH_THRESHOLD) {
1272 VERB3 bb_error_msg("measuring drift, datapoint ignored, %f sec remains",
1273 WATCH_THRESHOLD - since_last_update);
1274 return 0; /* "leave poll interval as is" */
1276 # if !USING_KERNEL_PLL_LOOP
1277 freq_drift = (offset - G.last_update_offset) / since_last_update;
1282 /* There are two main regimes: when the
1283 * offset exceeds the step threshold and when it does not.
1285 if (abs_offset > STEP_THRESHOLD) {
1286 switch (G.discipline_state) {
1288 /* The first outlyer: ignore it, switch to SPIK state */
1289 VERB3 bb_error_msg("offset:%+f - spike detected", offset);
1290 G.discipline_state = STATE_SPIK;
1291 return -1; /* "decrease poll interval" */
1294 /* Ignore succeeding outlyers until either an inlyer
1295 * is found or the stepout threshold is exceeded.
1297 if (since_last_update < WATCH_THRESHOLD) {
1298 VERB3 bb_error_msg("spike detected, datapoint ignored, %f sec remains",
1299 WATCH_THRESHOLD - since_last_update);
1300 return -1; /* "decrease poll interval" */
1302 /* fall through: we need to step */
1305 /* Step the time and clamp down the poll interval.
1307 * In NSET state an initial frequency correction is
1308 * not available, usually because the frequency file has
1309 * not yet been written. Since the time is outside the
1310 * capture range, the clock is stepped. The frequency
1311 * will be set directly following the stepout interval.
1313 * In FSET state the initial frequency has been set
1314 * from the frequency file. Since the time is outside
1315 * the capture range, the clock is stepped immediately,
1316 * rather than after the stepout interval. Guys get
1317 * nervous if it takes 17 minutes to set the clock for
1320 * In SPIK state the stepout threshold has expired and
1321 * the phase is still above the step threshold. Note
1322 * that a single spike greater than the step threshold
1323 * is always suppressed, even at the longer poll
1326 VERB3 bb_error_msg("stepping time by %+f; poll_exp=MINPOLL", offset);
1328 if (option_mask32 & OPT_q) {
1329 /* We were only asked to set time once. Done. */
1333 G.polladj_count = 0;
1334 G.poll_exp = MINPOLL;
1335 G.stratum = MAXSTRAT;
1337 run_script("step", offset);
1339 #if USING_INITIAL_FREQ_ESTIMATION
1340 if (G.discipline_state == STATE_NSET) {
1341 set_new_values(STATE_FREQ, /*offset:*/ 0, recv_time);
1342 return 1; /* "ok to increase poll interval" */
1345 abs_offset = offset = 0;
1346 set_new_values(STATE_SYNC, offset, recv_time);
1348 } else { /* abs_offset <= STEP_THRESHOLD */
1350 if (G.poll_exp < MINPOLL && G.initial_poll_complete) {
1351 VERB3 bb_error_msg("small offset:%+f, disabling burst mode", offset);
1352 G.polladj_count = 0;
1353 G.poll_exp = MINPOLL;
1356 /* Compute the clock jitter as the RMS of exponentially
1357 * weighted offset differences. Used by the poll adjust code.
1359 etemp = SQUARE(G.discipline_jitter);
1360 dtemp = SQUARE(offset - G.last_update_offset);
1361 G.discipline_jitter = SQRT(etemp + (dtemp - etemp) / AVG);
1363 switch (G.discipline_state) {
1365 if (option_mask32 & OPT_q) {
1366 /* We were only asked to set time once.
1367 * The clock is precise enough, no need to step.
1371 #if USING_INITIAL_FREQ_ESTIMATION
1372 /* This is the first update received and the frequency
1373 * has not been initialized. The first thing to do
1374 * is directly measure the oscillator frequency.
1376 set_new_values(STATE_FREQ, offset, recv_time);
1378 set_new_values(STATE_SYNC, offset, recv_time);
1380 VERB3 bb_error_msg("transitioning to FREQ, datapoint ignored");
1381 return 0; /* "leave poll interval as is" */
1383 #if 0 /* this is dead code for now */
1385 /* This is the first update and the frequency
1386 * has been initialized. Adjust the phase, but
1387 * don't adjust the frequency until the next update.
1389 set_new_values(STATE_SYNC, offset, recv_time);
1390 /* freq_drift remains 0 */
1394 #if USING_INITIAL_FREQ_ESTIMATION
1396 /* since_last_update >= WATCH_THRESHOLD, we waited enough.
1397 * Correct the phase and frequency and switch to SYNC state.
1398 * freq_drift was already estimated (see code above)
1400 set_new_values(STATE_SYNC, offset, recv_time);
1405 #if !USING_KERNEL_PLL_LOOP
1406 /* Compute freq_drift due to PLL and FLL contributions.
1408 * The FLL and PLL frequency gain constants
1409 * depend on the poll interval and Allan
1410 * intercept. The FLL is not used below one-half
1411 * the Allan intercept. Above that the loop gain
1412 * increases in steps to 1 / AVG.
1414 if ((1 << G.poll_exp) > ALLAN / 2) {
1415 etemp = FLL - G.poll_exp;
1418 freq_drift += (offset - G.last_update_offset) / (MAXD(since_last_update, ALLAN) * etemp);
1420 /* For the PLL the integration interval
1421 * (numerator) is the minimum of the update
1422 * interval and poll interval. This allows
1423 * oversampling, but not undersampling.
1425 etemp = MIND(since_last_update, (1 << G.poll_exp));
1426 dtemp = (4 * PLL) << G.poll_exp;
1427 freq_drift += offset * etemp / SQUARE(dtemp);
1429 set_new_values(STATE_SYNC, offset, recv_time);
1432 if (G.stratum != p->lastpkt_stratum + 1) {
1433 G.stratum = p->lastpkt_stratum + 1;
1434 run_script("stratum", offset);
1438 if (G.discipline_jitter < G_precision_sec)
1439 G.discipline_jitter = G_precision_sec;
1440 G.offset_to_jitter_ratio = abs_offset / G.discipline_jitter;
1442 G.reftime = G.cur_time;
1443 G.ntp_status = p->lastpkt_status;
1444 G.refid = p->lastpkt_refid;
1445 G.rootdelay = p->lastpkt_rootdelay + p->lastpkt_delay;
1446 dtemp = p->filter_jitter; // SQRT(SQUARE(p->filter_jitter) + SQUARE(G.cluster_jitter));
1447 dtemp += MAXD(p->filter_dispersion + FREQ_TOLERANCE * (G.cur_time - p->lastpkt_recv_time) + abs_offset, MINDISP);
1448 G.rootdisp = p->lastpkt_rootdisp + dtemp;
1449 VERB3 bb_error_msg("updating leap/refid/reftime/rootdisp from peer %s", p->p_dotted);
1451 /* We are in STATE_SYNC now, but did not do adjtimex yet.
1452 * (Any other state does not reach this, they all return earlier)
1453 * By this time, freq_drift and offset are set
1454 * to values suitable for adjtimex.
1456 #if !USING_KERNEL_PLL_LOOP
1457 /* Calculate the new frequency drift and frequency stability (wander).
1458 * Compute the clock wander as the RMS of exponentially weighted
1459 * frequency differences. This is not used directly, but can,
1460 * along with the jitter, be a highly useful monitoring and
1463 dtemp = G.discipline_freq_drift + freq_drift;
1464 G.discipline_freq_drift = MAXD(MIND(MAXDRIFT, dtemp), -MAXDRIFT);
1465 etemp = SQUARE(G.discipline_wander);
1466 dtemp = SQUARE(dtemp);
1467 G.discipline_wander = SQRT(etemp + (dtemp - etemp) / AVG);
1469 VERB3 bb_error_msg("discipline freq_drift=%.9f(int:%ld corr:%e) wander=%f",
1470 G.discipline_freq_drift,
1471 (long)(G.discipline_freq_drift * 65536e6),
1473 G.discipline_wander);
1476 memset(&tmx, 0, sizeof(tmx));
1477 if (adjtimex(&tmx) < 0)
1478 bb_perror_msg_and_die("adjtimex");
1479 bb_error_msg("p adjtimex freq:%ld offset:%+ld constant:%ld status:0x%x",
1480 tmx.freq, tmx.offset, tmx.constant, tmx.status);
1483 memset(&tmx, 0, sizeof(tmx));
1485 //doesn't work, offset remains 0 (!) in kernel:
1486 //ntpd: set adjtimex freq:1786097 tmx.offset:77487
1487 //ntpd: prev adjtimex freq:1786097 tmx.offset:0
1488 //ntpd: cur adjtimex freq:1786097 tmx.offset:0
1489 tmx.modes = ADJ_FREQUENCY | ADJ_OFFSET;
1490 /* 65536 is one ppm */
1491 tmx.freq = G.discipline_freq_drift * 65536e6;
1493 tmx.modes = ADJ_OFFSET | ADJ_STATUS | ADJ_TIMECONST;// | ADJ_MAXERROR | ADJ_ESTERROR;
1494 tmx.offset = (offset * 1000000); /* usec */
1495 tmx.status = STA_PLL;
1496 if (G.ntp_status & LI_PLUSSEC)
1497 tmx.status |= STA_INS;
1498 if (G.ntp_status & LI_MINUSSEC)
1499 tmx.status |= STA_DEL;
1501 tmx.constant = G.poll_exp - 4;
1503 * The below if statement should be unnecessary, but...
1504 * It looks like Linux kernel's PLL is far too gentle in changing
1505 * tmx.freq in response to clock offset. Offset keeps growing
1506 * and eventually we fall back to smaller poll intervals.
1507 * We can make correction more agressive (about x2) by supplying
1508 * PLL time constant which is one less than the real one.
1509 * To be on a safe side, let's do it only if offset is significantly
1510 * larger than jitter.
1512 if (tmx.constant > 0 && G.offset_to_jitter_ratio >= TIMECONST_HACK_GATE)
1515 //tmx.esterror = (uint32_t)(clock_jitter * 1e6);
1516 //tmx.maxerror = (uint32_t)((sys_rootdelay / 2 + sys_rootdisp) * 1e6);
1517 rc = adjtimex(&tmx);
1519 bb_perror_msg_and_die("adjtimex");
1520 /* NB: here kernel returns constant == G.poll_exp, not == G.poll_exp - 4.
1521 * Not sure why. Perhaps it is normal.
1523 VERB3 bb_error_msg("adjtimex:%d freq:%ld offset:%+ld status:0x%x",
1524 rc, tmx.freq, tmx.offset, tmx.status);
1525 G.kernel_freq_drift = tmx.freq / 65536;
1526 VERB2 bb_error_msg("update from:%s offset:%+f jitter:%f clock drift:%+.3fppm tc:%d",
1527 p->p_dotted, offset, G.discipline_jitter, (double)tmx.freq / 65536, (int)tmx.constant);
1529 return 1; /* "ok to increase poll interval" */
1534 * We've got a new reply packet from a peer, process it
1538 retry_interval(void)
1540 /* Local problem, want to retry soon */
1541 unsigned interval, r;
1542 interval = RETRY_INTERVAL;
1544 interval += r % (unsigned)(RETRY_INTERVAL / 4);
1545 VERB3 bb_error_msg("chose retry interval:%u", interval);
1549 poll_interval(int exponent)
1551 unsigned interval, r;
1552 exponent = G.poll_exp + exponent;
1555 interval = 1 << exponent;
1557 interval += ((r & (interval-1)) >> 4) + ((r >> 8) & 1); /* + 1/16 of interval, max */
1558 VERB3 bb_error_msg("chose poll interval:%u (poll_exp:%d exp:%d)", interval, G.poll_exp, exponent);
1561 static NOINLINE void
1562 recv_and_process_peer_pkt(peer_t *p)
1567 double T1, T2, T3, T4;
1569 datapoint_t *datapoint;
1572 /* We can recvfrom here and check from.IP, but some multihomed
1573 * ntp servers reply from their *other IP*.
1574 * TODO: maybe we should check at least what we can: from.port == 123?
1576 size = recv(p->p_fd, &msg, sizeof(msg), MSG_DONTWAIT);
1578 bb_perror_msg("recv(%s) error", p->p_dotted);
1579 if (errno == EHOSTUNREACH || errno == EHOSTDOWN
1580 || errno == ENETUNREACH || errno == ENETDOWN
1581 || errno == ECONNREFUSED || errno == EADDRNOTAVAIL
1584 //TODO: always do this?
1585 interval = retry_interval();
1586 goto set_next_and_close_sock;
1591 if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE) {
1592 bb_error_msg("malformed packet received from %s", p->p_dotted);
1596 if (msg.m_orgtime.int_partl != p->p_xmt_msg.m_xmttime.int_partl
1597 || msg.m_orgtime.fractionl != p->p_xmt_msg.m_xmttime.fractionl
1602 if ((msg.m_status & LI_ALARM) == LI_ALARM
1603 || msg.m_stratum == 0
1604 || msg.m_stratum > NTP_MAXSTRATUM
1606 // TODO: stratum 0 responses may have commands in 32-bit m_refid field:
1607 // "DENY", "RSTR" - peer does not like us at all
1608 // "RATE" - peer is overloaded, reduce polling freq
1609 interval = poll_interval(0);
1610 bb_error_msg("reply from %s: not synced, next query in %us", p->p_dotted, interval);
1611 goto set_next_and_close_sock;
1614 // /* Verify valid root distance */
1615 // if (msg.m_rootdelay / 2 + msg.m_rootdisp >= MAXDISP || p->lastpkt_reftime > msg.m_xmt)
1616 // return; /* invalid header values */
1618 p->lastpkt_status = msg.m_status;
1619 p->lastpkt_stratum = msg.m_stratum;
1620 p->lastpkt_rootdelay = sfp_to_d(msg.m_rootdelay);
1621 p->lastpkt_rootdisp = sfp_to_d(msg.m_rootdisp);
1622 p->lastpkt_refid = msg.m_refid;
1625 * From RFC 2030 (with a correction to the delay math):
1627 * Timestamp Name ID When Generated
1628 * ------------------------------------------------------------
1629 * Originate Timestamp T1 time request sent by client
1630 * Receive Timestamp T2 time request received by server
1631 * Transmit Timestamp T3 time reply sent by server
1632 * Destination Timestamp T4 time reply received by client
1634 * The roundtrip delay and local clock offset are defined as
1636 * delay = (T4 - T1) - (T3 - T2); offset = ((T2 - T1) + (T3 - T4)) / 2
1639 T2 = lfp_to_d(msg.m_rectime);
1640 T3 = lfp_to_d(msg.m_xmttime);
1643 p->lastpkt_recv_time = T4;
1645 VERB5 bb_error_msg("%s->lastpkt_recv_time=%f", p->p_dotted, p->lastpkt_recv_time);
1646 p->datapoint_idx = p->reachable_bits ? (p->datapoint_idx + 1) % NUM_DATAPOINTS : 0;
1647 datapoint = &p->filter_datapoint[p->datapoint_idx];
1648 datapoint->d_recv_time = T4;
1649 datapoint->d_offset = ((T2 - T1) + (T3 - T4)) / 2;
1650 /* The delay calculation is a special case. In cases where the
1651 * server and client clocks are running at different rates and
1652 * with very fast networks, the delay can appear negative. In
1653 * order to avoid violating the Principle of Least Astonishment,
1654 * the delay is clamped not less than the system precision.
1656 p->lastpkt_delay = (T4 - T1) - (T3 - T2);
1657 if (p->lastpkt_delay < G_precision_sec)
1658 p->lastpkt_delay = G_precision_sec;
1659 datapoint->d_dispersion = LOG2D(msg.m_precision_exp) + G_precision_sec;
1660 if (!p->reachable_bits) {
1661 /* 1st datapoint ever - replicate offset in every element */
1663 for (i = 0; i < NUM_DATAPOINTS; i++) {
1664 p->filter_datapoint[i].d_offset = datapoint->d_offset;
1668 p->reachable_bits |= 1;
1669 if ((MAX_VERBOSE && G.verbose) || (option_mask32 & OPT_w)) {
1670 bb_error_msg("reply from %s: reach 0x%02x offset %+f delay %f status 0x%02x strat %d refid 0x%08x rootdelay %f",
1673 datapoint->d_offset,
1678 p->lastpkt_rootdelay
1679 /* not shown: m_ppoll, m_precision_exp, m_rootdisp,
1680 * m_reftime, m_orgtime, m_rectime, m_xmttime
1685 /* Muck with statictics and update the clock */
1686 filter_datapoints(p);
1687 q = select_and_cluster();
1691 if (!(option_mask32 & OPT_w)) {
1692 rc = update_local_clock(q);
1693 /* If drift is dangerously large, immediately
1694 * drop poll interval one step down.
1696 if (fabs(q->filter_offset) >= POLLDOWN_OFFSET) {
1697 VERB3 bb_error_msg("offset:%+f > POLLDOWN_OFFSET", q->filter_offset);
1702 /* else: no peer selected, rc = -1: we want to poll more often */
1705 /* Adjust the poll interval by comparing the current offset
1706 * with the clock jitter. If the offset is less than
1707 * the clock jitter times a constant, then the averaging interval
1708 * is increased, otherwise it is decreased. A bit of hysteresis
1709 * helps calm the dance. Works best using burst mode.
1711 if (rc > 0 && G.offset_to_jitter_ratio <= POLLADJ_GATE) {
1712 /* was += G.poll_exp but it is a bit
1713 * too optimistic for my taste at high poll_exp's */
1714 G.polladj_count += MINPOLL;
1715 if (G.polladj_count > POLLADJ_LIMIT) {
1716 G.polladj_count = 0;
1717 if (G.poll_exp < MAXPOLL) {
1719 VERB3 bb_error_msg("polladj: discipline_jitter:%f ++poll_exp=%d",
1720 G.discipline_jitter, G.poll_exp);
1723 VERB3 bb_error_msg("polladj: incr:%d", G.polladj_count);
1726 G.polladj_count -= G.poll_exp * 2;
1727 if (G.polladj_count < -POLLADJ_LIMIT || G.poll_exp >= BIGPOLL) {
1729 G.polladj_count = 0;
1730 if (G.poll_exp > MINPOLL) {
1734 /* Correct p->next_action_time in each peer
1735 * which waits for sending, so that they send earlier.
1736 * Old pp->next_action_time are on the order
1737 * of t + (1 << old_poll_exp) + small_random,
1738 * we simply need to subtract ~half of that.
1740 for (item = G.ntp_peers; item != NULL; item = item->link) {
1741 peer_t *pp = (peer_t *) item->data;
1743 pp->next_action_time -= (1 << G.poll_exp);
1745 VERB3 bb_error_msg("polladj: discipline_jitter:%f --poll_exp=%d",
1746 G.discipline_jitter, G.poll_exp);
1749 VERB3 bb_error_msg("polladj: decr:%d", G.polladj_count);
1754 /* Decide when to send new query for this peer */
1755 interval = poll_interval(0);
1757 set_next_and_close_sock:
1758 set_next(p, interval);
1759 /* We do not expect any more packets from this peer for now.
1760 * Closing the socket informs kernel about it.
1761 * We open a new socket when we send a new query.
1769 #if ENABLE_FEATURE_NTPD_SERVER
1770 static NOINLINE void
1771 recv_and_process_client_pkt(void /*int fd*/)
1775 len_and_sockaddr *to;
1776 struct sockaddr *from;
1778 uint8_t query_status;
1779 l_fixedpt_t query_xmttime;
1781 to = get_sock_lsa(G.listen_fd);
1782 from = xzalloc(to->len);
1784 size = recv_from_to(G.listen_fd, &msg, sizeof(msg), MSG_DONTWAIT, from, &to->u.sa, to->len);
1785 if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE) {
1788 if (errno == EAGAIN)
1790 bb_perror_msg_and_die("recv");
1792 addr = xmalloc_sockaddr2dotted_noport(from);
1793 bb_error_msg("malformed packet received from %s: size %u", addr, (int)size);
1798 query_status = msg.m_status;
1799 query_xmttime = msg.m_xmttime;
1801 /* Build a reply packet */
1802 memset(&msg, 0, sizeof(msg));
1803 msg.m_status = G.stratum < MAXSTRAT ? G.ntp_status : LI_ALARM;
1804 msg.m_status |= (query_status & VERSION_MASK);
1805 msg.m_status |= ((query_status & MODE_MASK) == MODE_CLIENT) ?
1806 MODE_SERVER : MODE_SYM_PAS;
1807 msg.m_stratum = G.stratum;
1808 msg.m_ppoll = G.poll_exp;
1809 msg.m_precision_exp = G_precision_exp;
1810 /* this time was obtained between poll() and recv() */
1811 msg.m_rectime = d_to_lfp(G.cur_time);
1812 msg.m_xmttime = d_to_lfp(gettime1900d()); /* this instant */
1813 if (G.peer_cnt == 0) {
1814 /* we have no peers: "stratum 1 server" mode. reftime = our own time */
1815 G.reftime = G.cur_time;
1817 msg.m_reftime = d_to_lfp(G.reftime);
1818 msg.m_orgtime = query_xmttime;
1819 msg.m_rootdelay = d_to_sfp(G.rootdelay);
1820 //simple code does not do this, fix simple code!
1821 msg.m_rootdisp = d_to_sfp(G.rootdisp);
1822 //version = (query_status & VERSION_MASK); /* ... >> VERSION_SHIFT - done below instead */
1823 msg.m_refid = G.refid; // (version > (3 << VERSION_SHIFT)) ? G.refid : G.refid3;
1825 /* We reply from the local address packet was sent to,
1826 * this makes to/from look swapped here: */
1827 do_sendto(G.listen_fd,
1828 /*from:*/ &to->u.sa, /*to:*/ from, /*addrlen:*/ to->len,
1837 /* Upstream ntpd's options:
1839 * -4 Force DNS resolution of host names to the IPv4 namespace.
1840 * -6 Force DNS resolution of host names to the IPv6 namespace.
1841 * -a Require cryptographic authentication for broadcast client,
1842 * multicast client and symmetric passive associations.
1843 * This is the default.
1844 * -A Do not require cryptographic authentication for broadcast client,
1845 * multicast client and symmetric passive associations.
1846 * This is almost never a good idea.
1847 * -b Enable the client to synchronize to broadcast servers.
1849 * Specify the name and path of the configuration file,
1850 * default /etc/ntp.conf
1851 * -d Specify debugging mode. This option may occur more than once,
1852 * with each occurrence indicating greater detail of display.
1854 * Specify debugging level directly.
1856 * Specify the name and path of the frequency file.
1857 * This is the same operation as the "driftfile FILE"
1858 * configuration command.
1859 * -g Normally, ntpd exits with a message to the system log
1860 * if the offset exceeds the panic threshold, which is 1000 s
1861 * by default. This option allows the time to be set to any value
1862 * without restriction; however, this can happen only once.
1863 * If the threshold is exceeded after that, ntpd will exit
1864 * with a message to the system log. This option can be used
1865 * with the -q and -x options. See the tinker command for other options.
1867 * Chroot the server to the directory jaildir. This option also implies
1868 * that the server attempts to drop root privileges at startup
1869 * (otherwise, chroot gives very little additional security).
1870 * You may need to also specify a -u option.
1872 * Specify the name and path of the symmetric key file,
1873 * default /etc/ntp/keys. This is the same operation
1874 * as the "keys FILE" configuration command.
1876 * Specify the name and path of the log file. The default
1877 * is the system log file. This is the same operation as
1878 * the "logfile FILE" configuration command.
1879 * -L Do not listen to virtual IPs. The default is to listen.
1881 * -N To the extent permitted by the operating system,
1882 * run the ntpd at the highest priority.
1884 * Specify the name and path of the file used to record the ntpd
1885 * process ID. This is the same operation as the "pidfile FILE"
1886 * configuration command.
1888 * To the extent permitted by the operating system,
1889 * run the ntpd at the specified priority.
1890 * -q Exit the ntpd just after the first time the clock is set.
1891 * This behavior mimics that of the ntpdate program, which is
1892 * to be retired. The -g and -x options can be used with this option.
1893 * Note: The kernel time discipline is disabled with this option.
1895 * Specify the default propagation delay from the broadcast/multicast
1896 * server to this client. This is necessary only if the delay
1897 * cannot be computed automatically by the protocol.
1899 * Specify the directory path for files created by the statistics
1900 * facility. This is the same operation as the "statsdir DIR"
1901 * configuration command.
1903 * Add a key number to the trusted key list. This option can occur
1906 * Specify a user, and optionally a group, to switch to.
1909 * Add a system variable listed by default.
1910 * -x Normally, the time is slewed if the offset is less than the step
1911 * threshold, which is 128 ms by default, and stepped if above
1912 * the threshold. This option sets the threshold to 600 s, which is
1913 * well within the accuracy window to set the clock manually.
1914 * Note: since the slew rate of typical Unix kernels is limited
1915 * to 0.5 ms/s, each second of adjustment requires an amortization
1916 * interval of 2000 s. Thus, an adjustment as much as 600 s
1917 * will take almost 14 days to complete. This option can be used
1918 * with the -g and -q options. See the tinker command for other options.
1919 * Note: The kernel time discipline is disabled with this option.
1922 /* By doing init in a separate function we decrease stack usage
1925 static NOINLINE void ntp_init(char **argv)
1933 bb_error_msg_and_die(bb_msg_you_must_be_root);
1935 /* Set some globals */
1936 G.stratum = MAXSTRAT;
1938 G.poll_exp = BURSTPOLL; /* speeds up initial sync */
1939 G.last_script_run = G.reftime = G.last_update_recv_time = gettime1900d(); /* sets G.cur_time too */
1943 opt_complementary = "dd:p::wn"; /* d: counter; p: list; -w implies -n */
1944 opts = getopt32(argv,
1946 "wp:S:"IF_FEATURE_NTPD_SERVER("l") /* NOT compat */
1948 "46aAbgL", /* compat, ignored */
1949 &peers, &G.script_name, &G.verbose);
1950 if (!(opts & (OPT_p|OPT_l)))
1952 // if (opts & OPT_x) /* disable stepping, only slew is allowed */
1953 // G.time_was_stepped = 1;
1956 add_peers(llist_pop(&peers));
1958 /* -l but no peers: "stratum 1 server" mode */
1961 if (!(opts & OPT_n)) {
1962 bb_daemonize_or_rexec(DAEMON_DEVNULL_STDIO, argv);
1963 logmode = LOGMODE_NONE;
1965 #if ENABLE_FEATURE_NTPD_SERVER
1968 G.listen_fd = create_and_bind_dgram_or_die(NULL, 123);
1969 socket_want_pktinfo(G.listen_fd);
1970 setsockopt(G.listen_fd, IPPROTO_IP, IP_TOS, &const_IPTOS_LOWDELAY, sizeof(const_IPTOS_LOWDELAY));
1973 /* I hesitate to set -20 prio. -15 should be high enough for timekeeping */
1975 setpriority(PRIO_PROCESS, 0, -15);
1977 /* If network is up, syncronization occurs in ~10 seconds.
1978 * We give "ntpd -q" 10 seconds to get first reply,
1979 * then another 50 seconds to finish syncing.
1981 * I tested ntpd 4.2.6p1 and apparently it never exits
1982 * (will try forever), but it does not feel right.
1983 * The goal of -q is to act like ntpdate: set time
1984 * after a reasonably small period of polling, or fail.
1987 option_mask32 |= OPT_qq;
2004 int ntpd_main(int argc UNUSED_PARAM, char **argv) MAIN_EXTERNALLY_VISIBLE;
2005 int ntpd_main(int argc UNUSED_PARAM, char **argv)
2013 memset(&G, 0, sizeof(G));
2014 SET_PTR_TO_GLOBALS(&G);
2018 /* If ENABLE_FEATURE_NTPD_SERVER, + 1 for listen_fd: */
2019 cnt = G.peer_cnt + ENABLE_FEATURE_NTPD_SERVER;
2020 idx2peer = xzalloc(sizeof(idx2peer[0]) * cnt);
2021 pfd = xzalloc(sizeof(pfd[0]) * cnt);
2023 /* Countdown: we never sync before we sent INITIAL_SAMPLES+1
2024 * packets to each peer.
2025 * NB: if some peer is not responding, we may end up sending
2026 * fewer packets to it and more to other peers.
2027 * NB2: sync usually happens using INITIAL_SAMPLES packets,
2028 * since last reply does not come back instantaneously.
2030 cnt = G.peer_cnt * (INITIAL_SAMPLES + 1);
2032 while (!bb_got_signal) {
2038 /* Nothing between here and poll() blocks for any significant time */
2040 nextaction = G.cur_time + 3600;
2043 #if ENABLE_FEATURE_NTPD_SERVER
2044 if (G.listen_fd != -1) {
2045 pfd[0].fd = G.listen_fd;
2046 pfd[0].events = POLLIN;
2050 /* Pass over peer list, send requests, time out on receives */
2051 for (item = G.ntp_peers; item != NULL; item = item->link) {
2052 peer_t *p = (peer_t *) item->data;
2054 if (p->next_action_time <= G.cur_time) {
2055 if (p->p_fd == -1) {
2056 /* Time to send new req */
2058 G.initial_poll_complete = 1;
2060 send_query_to_peer(p);
2062 /* Timed out waiting for reply */
2065 timeout = poll_interval(-2); /* -2: try a bit sooner */
2066 bb_error_msg("timed out waiting for %s, reach 0x%02x, next query in %us",
2067 p->p_dotted, p->reachable_bits, timeout);
2068 set_next(p, timeout);
2072 if (p->next_action_time < nextaction)
2073 nextaction = p->next_action_time;
2076 /* Wait for reply from this peer */
2077 pfd[i].fd = p->p_fd;
2078 pfd[i].events = POLLIN;
2084 timeout = nextaction - G.cur_time;
2087 timeout++; /* (nextaction - G.cur_time) rounds down, compensating */
2089 /* Here we may block */
2091 if (i > (ENABLE_FEATURE_NTPD_SERVER && G.listen_fd != -1)) {
2092 /* We wait for at least one reply.
2093 * Poll for it, without wasting time for message.
2094 * Since replies often come under 1 second, this also
2095 * reduces clutter in logs.
2097 nfds = poll(pfd, i, 1000);
2103 bb_error_msg("poll %us, sockets:%u, poll interval:%us", timeout, i, 1 << G.poll_exp);
2105 nfds = poll(pfd, i, timeout * 1000);
2107 gettime1900d(); /* sets G.cur_time */
2109 if (G.script_name && G.cur_time - G.last_script_run > 11*60) {
2110 /* Useful for updating battery-backed RTC and such */
2111 run_script("periodic", G.last_update_offset);
2112 gettime1900d(); /* sets G.cur_time */
2117 /* Process any received packets */
2119 #if ENABLE_FEATURE_NTPD_SERVER
2120 if (G.listen_fd != -1) {
2121 if (pfd[0].revents /* & (POLLIN|POLLERR)*/) {
2123 recv_and_process_client_pkt(/*G.listen_fd*/);
2124 gettime1900d(); /* sets G.cur_time */
2129 for (; nfds != 0 && j < i; j++) {
2130 if (pfd[j].revents /* & (POLLIN|POLLERR)*/) {
2132 * At init, alarm was set to 10 sec.
2133 * Now we did get a reply.
2134 * Increase timeout to 50 seconds to finish syncing.
2136 if (option_mask32 & OPT_qq) {
2137 option_mask32 &= ~OPT_qq;
2141 recv_and_process_peer_pkt(idx2peer[j]);
2142 gettime1900d(); /* sets G.cur_time */
2145 } /* while (!bb_got_signal) */
2147 kill_myself_with_sig(bb_got_signal);
2155 /*** openntpd-4.6 uses only adjtime, not adjtimex ***/
2157 /*** ntp-4.2.6/ntpd/ntp_loopfilter.c - adjtimex usage ***/
2161 direct_freq(double fp_offset)
2165 * If the kernel is enabled, we need the residual offset to
2166 * calculate the frequency correction.
2168 if (pll_control && kern_enable) {
2169 memset(&ntv, 0, sizeof(ntv));
2172 clock_offset = ntv.offset / 1e9;
2173 #else /* STA_NANO */
2174 clock_offset = ntv.offset / 1e6;
2175 #endif /* STA_NANO */
2176 drift_comp = FREQTOD(ntv.freq);
2178 #endif /* KERNEL_PLL */
2179 set_freq((fp_offset - clock_offset) / (current_time - clock_epoch) + drift_comp);
2185 set_freq(double freq) /* frequency update */
2193 * If the kernel is enabled, update the kernel frequency.
2195 if (pll_control && kern_enable) {
2196 memset(&ntv, 0, sizeof(ntv));
2197 ntv.modes = MOD_FREQUENCY;
2198 ntv.freq = DTOFREQ(drift_comp);
2200 snprintf(tbuf, sizeof(tbuf), "kernel %.3f PPM", drift_comp * 1e6);
2201 report_event(EVNT_FSET, NULL, tbuf);
2203 snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp * 1e6);
2204 report_event(EVNT_FSET, NULL, tbuf);
2206 #else /* KERNEL_PLL */
2207 snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp * 1e6);
2208 report_event(EVNT_FSET, NULL, tbuf);
2209 #endif /* KERNEL_PLL */
2218 * This code segment works when clock adjustments are made using
2219 * precision time kernel support and the ntp_adjtime() system
2220 * call. This support is available in Solaris 2.6 and later,
2221 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
2222 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
2223 * DECstation 5000/240 and Alpha AXP, additional kernel
2224 * modifications provide a true microsecond clock and nanosecond
2225 * clock, respectively.
2227 * Important note: The kernel discipline is used only if the
2228 * step threshold is less than 0.5 s, as anything higher can
2229 * lead to overflow problems. This might occur if some misguided
2230 * lad set the step threshold to something ridiculous.
2232 if (pll_control && kern_enable) {
2234 #define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | MOD_STATUS | MOD_TIMECONST)
2237 * We initialize the structure for the ntp_adjtime()
2238 * system call. We have to convert everything to
2239 * microseconds or nanoseconds first. Do not update the
2240 * system variables if the ext_enable flag is set. In
2241 * this case, the external clock driver will update the
2242 * variables, which will be read later by the local
2243 * clock driver. Afterwards, remember the time and
2244 * frequency offsets for jitter and stability values and
2245 * to update the frequency file.
2247 memset(&ntv, 0, sizeof(ntv));
2249 ntv.modes = MOD_STATUS;
2252 ntv.modes = MOD_BITS | MOD_NANO;
2253 #else /* STA_NANO */
2254 ntv.modes = MOD_BITS;
2255 #endif /* STA_NANO */
2256 if (clock_offset < 0)
2261 ntv.offset = (int32)(clock_offset * 1e9 + dtemp);
2262 ntv.constant = sys_poll;
2263 #else /* STA_NANO */
2264 ntv.offset = (int32)(clock_offset * 1e6 + dtemp);
2265 ntv.constant = sys_poll - 4;
2266 #endif /* STA_NANO */
2267 ntv.esterror = (u_int32)(clock_jitter * 1e6);
2268 ntv.maxerror = (u_int32)((sys_rootdelay / 2 + sys_rootdisp) * 1e6);
2269 ntv.status = STA_PLL;
2272 * Enable/disable the PPS if requested.
2275 if (!(pll_status & STA_PPSTIME))
2276 report_event(EVNT_KERN,
2277 NULL, "PPS enabled");
2278 ntv.status |= STA_PPSTIME | STA_PPSFREQ;
2280 if (pll_status & STA_PPSTIME)
2281 report_event(EVNT_KERN,
2282 NULL, "PPS disabled");
2283 ntv.status &= ~(STA_PPSTIME |
2286 if (sys_leap == LEAP_ADDSECOND)
2287 ntv.status |= STA_INS;
2288 else if (sys_leap == LEAP_DELSECOND)
2289 ntv.status |= STA_DEL;
2293 * Pass the stuff to the kernel. If it squeals, turn off
2294 * the pps. In any case, fetch the kernel offset,
2295 * frequency and jitter.
2297 if (ntp_adjtime(&ntv) == TIME_ERROR) {
2298 if (!(ntv.status & STA_PPSSIGNAL))
2299 report_event(EVNT_KERN, NULL,
2302 pll_status = ntv.status;
2304 clock_offset = ntv.offset / 1e9;
2305 #else /* STA_NANO */
2306 clock_offset = ntv.offset / 1e6;
2307 #endif /* STA_NANO */
2308 clock_frequency = FREQTOD(ntv.freq);
2311 * If the kernel PPS is lit, monitor its performance.
2313 if (ntv.status & STA_PPSTIME) {
2315 clock_jitter = ntv.jitter / 1e9;
2316 #else /* STA_NANO */
2317 clock_jitter = ntv.jitter / 1e6;
2318 #endif /* STA_NANO */
2321 #if defined(STA_NANO) && NTP_API == 4
2323 * If the TAI changes, update the kernel TAI.
2325 if (loop_tai != sys_tai) {
2327 ntv.modes = MOD_TAI;
2328 ntv.constant = sys_tai;
2331 #endif /* STA_NANO */
2333 #endif /* KERNEL_PLL */