2 * NTP client/server, based on OpenNTPD 3.9p1
4 * Busybox port author: Adam Tkac (C) 2009 <vonsch@gmail.com>
6 * OpenNTPd 3.9p1 copyright holders:
7 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
8 * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
10 * OpenNTPd code is licensed under ISC-style licence:
12 * Permission to use, copy, modify, and distribute this software for any
13 * purpose with or without fee is hereby granted, provided that the above
14 * copyright notice and this permission notice appear in all copies.
16 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
17 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
19 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
20 * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER
21 * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
22 * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ***********************************************************************
25 * Parts of OpenNTPD clock syncronization code is replaced by
26 * code which is based on ntp-4.2.6, which carries the following
29 * Copyright (c) University of Delaware 1992-2009
31 * Permission to use, copy, modify, and distribute this software and
32 * its documentation for any purpose with or without fee is hereby
33 * granted, provided that the above copyright notice appears in all
34 * copies and that both the copyright notice and this permission
35 * notice appear in supporting documentation, and that the name
36 * University of Delaware not be used in advertising or publicity
37 * pertaining to distribution of the software without specific,
38 * written prior permission. The University of Delaware makes no
39 * representations about the suitability this software for any
40 * purpose. It is provided "as is" without express or implied warranty.
41 ***********************************************************************
46 //config: select PLATFORM_LINUX
48 //config: The NTP client/server daemon.
50 //config:config FEATURE_NTPD_SERVER
51 //config: bool "Make ntpd usable as a NTP server"
53 //config: depends on NTPD
55 //config: Make ntpd usable as a NTP server. If you disable this option
56 //config: ntpd will be usable only as a NTP client.
58 //config:config FEATURE_NTPD_CONF
59 //config: bool "Make ntpd understand /etc/ntp.conf"
61 //config: depends on NTPD
63 //config: Make ntpd look in /etc/ntp.conf for peers. Only "server address"
64 //config: is supported.
66 //applet:IF_NTPD(APPLET(ntpd, BB_DIR_USR_SBIN, BB_SUID_DROP))
68 //kbuild:lib-$(CONFIG_NTPD) += ntpd.o
70 //usage:#define ntpd_trivial_usage
71 //usage: "[-dnqNw"IF_FEATURE_NTPD_SERVER("l -I IFACE")"] [-S PROG] [-p PEER]..."
72 //usage:#define ntpd_full_usage "\n\n"
73 //usage: "NTP client/server\n"
74 //usage: "\n -d Verbose"
75 //usage: "\n -n Do not daemonize"
76 //usage: "\n -q Quit after clock is set"
77 //usage: "\n -N Run at high priority"
78 //usage: "\n -w Do not set time (only query peers), implies -n"
79 //usage: "\n -S PROG Run PROG after stepping time, stratum change, and every 11 mins"
80 //usage: "\n -p PEER Obtain time from PEER (may be repeated)"
81 //usage: IF_FEATURE_NTPD_CONF(
82 //usage: "\n If -p is not given, 'server HOST' lines"
83 //usage: "\n from /etc/ntp.conf are used"
85 //usage: IF_FEATURE_NTPD_SERVER(
86 //usage: "\n -l Also run as server on port 123"
87 //usage: "\n -I IFACE Bind server to IFACE, implies -l"
90 // -l and -p options are not compatible with "standard" ntpd:
91 // it has them as "-l logfile" and "-p pidfile".
92 // -S and -w are not compat either, "standard" ntpd has no such opts.
96 #include <netinet/ip.h> /* For IPTOS_LOWDELAY definition */
97 #include <sys/resource.h> /* setpriority */
98 #include <sys/timex.h>
99 #ifndef IPTOS_LOWDELAY
100 # define IPTOS_LOWDELAY 0x10
104 /* Verbosity control (max level of -dddd options accepted).
105 * max 6 is very talkative (and bloated). 3 is non-bloated,
106 * production level setting.
108 #define MAX_VERBOSE 3
111 /* High-level description of the algorithm:
113 * We start running with very small poll_exp, BURSTPOLL,
114 * in order to quickly accumulate INITIAL_SAMPLES datapoints
115 * for each peer. Then, time is stepped if the offset is larger
116 * than STEP_THRESHOLD, otherwise it isn't; anyway, we enlarge
117 * poll_exp to MINPOLL and enter frequency measurement step:
118 * we collect new datapoints but ignore them for WATCH_THRESHOLD
119 * seconds. After WATCH_THRESHOLD seconds we look at accumulated
120 * offset and estimate frequency drift.
122 * (frequency measurement step seems to not be strictly needed,
123 * it is conditionally disabled with USING_INITIAL_FREQ_ESTIMATION
126 * After this, we enter "steady state": we collect a datapoint,
127 * we select the best peer, if this datapoint is not a new one
128 * (IOW: if this datapoint isn't for selected peer), sleep
129 * and collect another one; otherwise, use its offset to update
130 * frequency drift, if offset is somewhat large, reduce poll_exp,
131 * otherwise increase poll_exp.
133 * If offset is larger than STEP_THRESHOLD, which shouldn't normally
134 * happen, we assume that something "bad" happened (computer
135 * was hibernated, someone set totally wrong date, etc),
136 * then the time is stepped, all datapoints are discarded,
137 * and we go back to steady state.
139 * Made some changes to speed up re-syncing after our clock goes bad
140 * (tested with suspending my laptop):
141 * - if largish offset (>= STEP_THRESHOLD == 1 sec) is seen
142 * from a peer, schedule next query for this peer soon
143 * without drastically lowering poll interval for everybody.
144 * This makes us collect enough data for step much faster:
145 * e.g. at poll = 10 (1024 secs), step was done within 5 minutes
146 * after first reply which indicated that our clock is 14 seconds off.
147 * - on step, do not discard d_dispersion data of the existing datapoints,
148 * do not clear reachable_bits. This prevents discarding first ~8
149 * datapoints after the step.
152 #define INITIAL_SAMPLES 4 /* how many samples do we want for init */
153 #define BAD_DELAY_GROWTH 4 /* drop packet if its delay grew by more than this */
155 #define RETRY_INTERVAL 32 /* on send/recv error, retry in N secs (need to be power of 2) */
156 #define NOREPLY_INTERVAL 512 /* sent, but got no reply: cap next query by this many seconds */
157 #define RESPONSE_INTERVAL 16 /* wait for reply up to N secs */
158 #define HOSTNAME_INTERVAL 5 /* hostname lookup failed. Wait N secs for next try */
160 /* Step threshold (sec). std ntpd uses 0.128.
162 #define STEP_THRESHOLD 1
163 /* Slew threshold (sec): adjtimex() won't accept offsets larger than this.
164 * Using exact power of 2 (1/8) results in smaller code
166 #define SLEW_THRESHOLD 0.125
167 /* Stepout threshold (sec). std ntpd uses 900 (11 mins (!)) */
168 #define WATCH_THRESHOLD 128
169 /* NB: set WATCH_THRESHOLD to ~60 when debugging to save time) */
170 //UNUSED: #define PANIC_THRESHOLD 1000 /* panic threshold (sec) */
173 * If we got |offset| > BIGOFF from a peer, cap next query interval
174 * for this peer by this many seconds:
176 #define BIGOFF STEP_THRESHOLD
177 #define BIGOFF_INTERVAL (1 << 7) /* 128 s */
179 #define FREQ_TOLERANCE 0.000015 /* frequency tolerance (15 PPM) */
180 #define BURSTPOLL 0 /* initial poll */
181 #define MINPOLL 5 /* minimum poll interval. std ntpd uses 6 (6: 64 sec) */
183 * If offset > discipline_jitter * POLLADJ_GATE, and poll interval is > 2^BIGPOLL,
184 * then it is decreased _at once_. (If <= 2^BIGPOLL, it will be decreased _eventually_).
186 #define BIGPOLL 9 /* 2^9 sec ~= 8.5 min */
187 #define MAXPOLL 12 /* maximum poll interval (12: 1.1h, 17: 36.4h). std ntpd uses 17 */
189 * Actively lower poll when we see such big offsets.
190 * With SLEW_THRESHOLD = 0.125, it means we try to sync more aggressively
191 * if offset increases over ~0.04 sec
193 //#define POLLDOWN_OFFSET (SLEW_THRESHOLD / 3)
194 #define MINDISP 0.01 /* minimum dispersion (sec) */
195 #define MAXDISP 16 /* maximum dispersion (sec) */
196 #define MAXSTRAT 16 /* maximum stratum (infinity metric) */
197 #define MAXDIST 1 /* distance threshold (sec) */
198 #define MIN_SELECTED 1 /* minimum intersection survivors */
199 #define MIN_CLUSTERED 3 /* minimum cluster survivors */
201 #define MAXDRIFT 0.000500 /* frequency drift we can correct (500 PPM) */
203 /* Poll-adjust threshold.
204 * When we see that offset is small enough compared to discipline jitter,
205 * we grow a counter: += MINPOLL. When counter goes over POLLADJ_LIMIT,
206 * we poll_exp++. If offset isn't small, counter -= poll_exp*2,
207 * and when it goes below -POLLADJ_LIMIT, we poll_exp--.
208 * (Bumped from 30 to 40 since otherwise I often see poll_exp going *2* steps down)
210 #define POLLADJ_LIMIT 40
211 /* If offset < discipline_jitter * POLLADJ_GATE, then we decide to increase
212 * poll interval (we think we can't improve timekeeping
213 * by staying at smaller poll).
215 #define POLLADJ_GATE 4
216 #define TIMECONST_HACK_GATE 2
217 /* Compromise Allan intercept (sec). doc uses 1500, std ntpd uses 512 */
221 /* FLL loop gain [why it depends on MAXPOLL??] */
222 #define FLL (MAXPOLL + 1)
223 /* Parameter averaging constant */
232 NTP_MSGSIZE_NOAUTH = 48,
233 NTP_MSGSIZE = (NTP_MSGSIZE_NOAUTH + 4 + NTP_DIGESTSIZE),
236 MODE_MASK = (7 << 0),
237 VERSION_MASK = (7 << 3),
241 /* Leap Second Codes (high order two bits of m_status) */
242 LI_NOWARNING = (0 << 6), /* no warning */
243 LI_PLUSSEC = (1 << 6), /* add a second (61 seconds) */
244 LI_MINUSSEC = (2 << 6), /* minus a second (59 seconds) */
245 LI_ALARM = (3 << 6), /* alarm condition */
248 MODE_RES0 = 0, /* reserved */
249 MODE_SYM_ACT = 1, /* symmetric active */
250 MODE_SYM_PAS = 2, /* symmetric passive */
251 MODE_CLIENT = 3, /* client */
252 MODE_SERVER = 4, /* server */
253 MODE_BROADCAST = 5, /* broadcast */
254 MODE_RES1 = 6, /* reserved for NTP control message */
255 MODE_RES2 = 7, /* reserved for private use */
258 //TODO: better base selection
259 #define OFFSET_1900_1970 2208988800UL /* 1970 - 1900 in seconds */
261 #define NUM_DATAPOINTS 8
274 uint8_t m_status; /* status of local clock and leap info */
276 uint8_t m_ppoll; /* poll value */
277 int8_t m_precision_exp;
278 s_fixedpt_t m_rootdelay;
279 s_fixedpt_t m_rootdisp;
281 l_fixedpt_t m_reftime;
282 l_fixedpt_t m_orgtime;
283 l_fixedpt_t m_rectime;
284 l_fixedpt_t m_xmttime;
286 uint8_t m_digest[NTP_DIGESTSIZE];
296 len_and_sockaddr *p_lsa;
300 uint32_t lastpkt_refid;
301 uint8_t lastpkt_status;
302 uint8_t lastpkt_stratum;
303 uint8_t reachable_bits;
304 /* when to send new query (if p_fd == -1)
305 * or when receive times out (if p_fd >= 0): */
306 double next_action_time;
309 /* p_raw_delay is set even by "high delay" packets */
310 /* lastpkt_delay isn't */
311 double lastpkt_recv_time;
312 double lastpkt_delay;
313 double lastpkt_rootdelay;
314 double lastpkt_rootdisp;
315 /* produced by filter algorithm: */
316 double filter_offset;
317 double filter_dispersion;
318 double filter_jitter;
319 datapoint_t filter_datapoint[NUM_DATAPOINTS];
320 /* last sent packet: */
326 #define USING_KERNEL_PLL_LOOP 1
327 #define USING_INITIAL_FREQ_ESTIMATION 0
334 /* Insert new options above this line. */
335 /* Non-compat options: */
339 OPT_l = (1 << 7) * ENABLE_FEATURE_NTPD_SERVER,
340 OPT_I = (1 << 8) * ENABLE_FEATURE_NTPD_SERVER,
341 /* We hijack some bits for other purposes */
347 /* total round trip delay to currently selected reference clock */
349 /* reference timestamp: time when the system clock was last set or corrected */
351 /* total dispersion to currently selected reference clock */
354 double last_script_run;
357 #if ENABLE_FEATURE_NTPD_SERVER
360 # define G_listen_fd (G.listen_fd)
362 # define G_listen_fd (-1)
366 /* refid: 32-bit code identifying the particular server or reference clock
367 * in stratum 0 packets this is a four-character ASCII string,
368 * called the kiss code, used for debugging and monitoring
369 * in stratum 1 packets this is a four-character ASCII string
370 * assigned to the reference clock by IANA. Example: "GPS "
371 * in stratum 2+ packets, it's IPv4 address or 4 first bytes
372 * of MD5 hash of IPv6
376 /* precision is defined as the larger of the resolution and time to
377 * read the clock, in log2 units. For instance, the precision of a
378 * mains-frequency clock incrementing at 60 Hz is 16 ms, even when the
379 * system clock hardware representation is to the nanosecond.
381 * Delays, jitters of various kinds are clamped down to precision.
383 * If precision_sec is too large, discipline_jitter gets clamped to it
384 * and if offset is smaller than discipline_jitter * POLLADJ_GATE, poll
385 * interval grows even though we really can benefit from staying at
386 * smaller one, collecting non-lagged datapoits and correcting offset.
387 * (Lagged datapoits exist when poll_exp is large but we still have
388 * systematic offset error - the time distance between datapoints
389 * is significant and older datapoints have smaller offsets.
390 * This makes our offset estimation a bit smaller than reality)
391 * Due to this effect, setting G_precision_sec close to
392 * STEP_THRESHOLD isn't such a good idea - offsets may grow
393 * too big and we will step. I observed it with -6.
395 * OTOH, setting precision_sec far too small would result in futile
396 * attempts to syncronize to an unachievable precision.
398 * -6 is 1/64 sec, -7 is 1/128 sec and so on.
399 * -8 is 1/256 ~= 0.003906 (worked well for me --vda)
400 * -9 is 1/512 ~= 0.001953 (let's try this for some time)
402 #define G_precision_exp -9
404 * G_precision_exp is used only for construction outgoing packets.
405 * It's ok to set G_precision_sec to a slightly different value
406 * (One which is "nicer looking" in logs).
407 * Exact value would be (1.0 / (1 << (- G_precision_exp))):
409 #define G_precision_sec 0.002
412 #define STATE_NSET 0 /* initial state, "nothing is set" */
413 //#define STATE_FSET 1 /* frequency set from file */
414 //#define STATE_SPIK 2 /* spike detected */
415 //#define STATE_FREQ 3 /* initial frequency */
416 #define STATE_SYNC 4 /* clock synchronized (normal operation) */
417 uint8_t discipline_state; // doc calls it c.state
418 uint8_t poll_exp; // s.poll
419 int polladj_count; // c.count
420 long kernel_freq_drift;
421 peer_t *last_update_peer;
422 double last_update_offset; // c.last
423 double last_update_recv_time; // s.t
424 double discipline_jitter; // c.jitter
425 /* Since we only compare it with ints, can simplify code
426 * by not making this variable floating point:
428 unsigned offset_to_jitter_ratio;
429 //double cluster_offset; // s.offset
430 //double cluster_jitter; // s.jitter
431 #if !USING_KERNEL_PLL_LOOP
432 double discipline_freq_drift; // c.freq
433 /* Maybe conditionally calculate wander? it's used only for logging */
434 double discipline_wander; // c.wander
437 #define G (*ptr_to_globals)
440 #define VERB1 if (MAX_VERBOSE && G.verbose)
441 #define VERB2 if (MAX_VERBOSE >= 2 && G.verbose >= 2)
442 #define VERB3 if (MAX_VERBOSE >= 3 && G.verbose >= 3)
443 #define VERB4 if (MAX_VERBOSE >= 4 && G.verbose >= 4)
444 #define VERB5 if (MAX_VERBOSE >= 5 && G.verbose >= 5)
445 #define VERB6 if (MAX_VERBOSE >= 6 && G.verbose >= 6)
448 static double LOG2D(int a)
451 return 1.0 / (1UL << -a);
454 static ALWAYS_INLINE double SQUARE(double x)
458 static ALWAYS_INLINE double MAXD(double a, double b)
464 static ALWAYS_INLINE double MIND(double a, double b)
470 static NOINLINE double my_SQRT(double X)
477 double Xhalf = X * 0.5;
479 /* Fast and good approximation to 1/sqrt(X), black magic */
481 /*v.i = 0x5f3759df - (v.i >> 1);*/
482 v.i = 0x5f375a86 - (v.i >> 1); /* - this constant is slightly better */
483 invsqrt = v.f; /* better than 0.2% accuracy */
485 /* Refining it using Newton's method: x1 = x0 - f(x0)/f'(x0)
486 * f(x) = 1/(x*x) - X (f==0 when x = 1/sqrt(X))
488 * f(x)/f'(x) = (X - 1/(x*x)) / (2/(x*x*x)) = X*x*x*x/2 - x/2
489 * x1 = x0 - (X*x0*x0*x0/2 - x0/2) = 1.5*x0 - X*x0*x0*x0/2 = x0*(1.5 - (X/2)*x0*x0)
491 invsqrt = invsqrt * (1.5 - Xhalf * invsqrt * invsqrt); /* ~0.05% accuracy */
492 /* invsqrt = invsqrt * (1.5 - Xhalf * invsqrt * invsqrt); 2nd iter: ~0.0001% accuracy */
493 /* With 4 iterations, more than half results will be exact,
494 * at 6th iterations result stabilizes with about 72% results exact.
495 * We are well satisfied with 0.05% accuracy.
498 return X * invsqrt; /* X * 1/sqrt(X) ~= sqrt(X) */
500 static ALWAYS_INLINE double SQRT(double X)
502 /* If this arch doesn't use IEEE 754 floats, fall back to using libm */
503 if (sizeof(float) != 4)
506 /* This avoids needing libm, saves about 0.5k on x86-32 */
514 gettimeofday(&tv, NULL); /* never fails */
515 G.cur_time = tv.tv_sec + (1.0e-6 * tv.tv_usec) + OFFSET_1900_1970;
520 d_to_tv(double d, struct timeval *tv)
522 tv->tv_sec = (long)d;
523 tv->tv_usec = (d - tv->tv_sec) * 1000000;
527 lfp_to_d(l_fixedpt_t lfp)
530 lfp.int_partl = ntohl(lfp.int_partl);
531 lfp.fractionl = ntohl(lfp.fractionl);
532 ret = (double)lfp.int_partl + ((double)lfp.fractionl / UINT_MAX);
536 sfp_to_d(s_fixedpt_t sfp)
539 sfp.int_parts = ntohs(sfp.int_parts);
540 sfp.fractions = ntohs(sfp.fractions);
541 ret = (double)sfp.int_parts + ((double)sfp.fractions / USHRT_MAX);
544 #if ENABLE_FEATURE_NTPD_SERVER
549 lfp.int_partl = (uint32_t)d;
550 lfp.fractionl = (uint32_t)((d - lfp.int_partl) * UINT_MAX);
551 lfp.int_partl = htonl(lfp.int_partl);
552 lfp.fractionl = htonl(lfp.fractionl);
559 sfp.int_parts = (uint16_t)d;
560 sfp.fractions = (uint16_t)((d - sfp.int_parts) * USHRT_MAX);
561 sfp.int_parts = htons(sfp.int_parts);
562 sfp.fractions = htons(sfp.fractions);
568 dispersion(const datapoint_t *dp)
570 return dp->d_dispersion + FREQ_TOLERANCE * (G.cur_time - dp->d_recv_time);
574 root_distance(peer_t *p)
576 /* The root synchronization distance is the maximum error due to
577 * all causes of the local clock relative to the primary server.
578 * It is defined as half the total delay plus total dispersion
581 return MAXD(MINDISP, p->lastpkt_rootdelay + p->lastpkt_delay) / 2
582 + p->lastpkt_rootdisp
583 + p->filter_dispersion
584 + FREQ_TOLERANCE * (G.cur_time - p->lastpkt_recv_time)
589 set_next(peer_t *p, unsigned t)
591 p->next_action_time = G.cur_time + t;
595 * Peer clock filter and its helpers
598 filter_datapoints(peer_t *p)
605 /* Simulations have shown that use of *averaged* offset for p->filter_offset
606 * is in fact worse than simply using last received one: with large poll intervals
607 * (>= 2048) averaging code uses offset values which are outdated by hours,
608 * and time/frequency correction goes totally wrong when fed essentially bogus offsets.
611 double minoff, maxoff, w;
612 double x = x; /* for compiler */
613 double oldest_off = oldest_off;
614 double oldest_age = oldest_age;
615 double newest_off = newest_off;
616 double newest_age = newest_age;
618 fdp = p->filter_datapoint;
620 minoff = maxoff = fdp[0].d_offset;
621 for (i = 1; i < NUM_DATAPOINTS; i++) {
622 if (minoff > fdp[i].d_offset)
623 minoff = fdp[i].d_offset;
624 if (maxoff < fdp[i].d_offset)
625 maxoff = fdp[i].d_offset;
628 idx = p->datapoint_idx; /* most recent datapoint's index */
630 * Drop two outliers and take weighted average of the rest:
631 * most_recent/2 + older1/4 + older2/8 ... + older5/32 + older6/32
632 * we use older6/32, not older6/64 since sum of weights should be 1:
633 * 1/2 + 1/4 + 1/8 + 1/16 + 1/32 + 1/32 = 1
639 * filter_dispersion = \ -------------
646 for (i = 0; i < NUM_DATAPOINTS; i++) {
648 bb_error_msg("datapoint[%d]: off:%f disp:%f(%f) age:%f%s",
651 fdp[idx].d_dispersion, dispersion(&fdp[idx]),
652 G.cur_time - fdp[idx].d_recv_time,
653 (minoff == fdp[idx].d_offset || maxoff == fdp[idx].d_offset)
654 ? " (outlier by offset)" : ""
658 sum += dispersion(&fdp[idx]) / (2 << i);
660 if (minoff == fdp[idx].d_offset) {
661 minoff -= 1; /* so that we don't match it ever again */
663 if (maxoff == fdp[idx].d_offset) {
666 oldest_off = fdp[idx].d_offset;
667 oldest_age = G.cur_time - fdp[idx].d_recv_time;
670 newest_off = oldest_off;
671 newest_age = oldest_age;
678 idx = (idx - 1) & (NUM_DATAPOINTS - 1);
680 p->filter_dispersion = sum;
681 wavg += x; /* add another older6/64 to form older6/32 */
682 /* Fix systematic underestimation with large poll intervals.
683 * Imagine that we still have a bit of uncorrected drift,
684 * and poll interval is big (say, 100 sec). Offsets form a progression:
685 * 0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 - 0.7 is most recent.
686 * The algorithm above drops 0.0 and 0.7 as outliers,
687 * and then we have this estimation, ~25% off from 0.7:
688 * 0.1/32 + 0.2/32 + 0.3/16 + 0.4/8 + 0.5/4 + 0.6/2 = 0.503125
690 x = oldest_age - newest_age;
692 x = newest_age / x; /* in above example, 100 / (600 - 100) */
693 if (x < 1) { /* paranoia check */
694 x = (newest_off - oldest_off) * x; /* 0.5 * 100/500 = 0.1 */
698 p->filter_offset = wavg;
702 fdp = p->filter_datapoint;
703 idx = p->datapoint_idx; /* most recent datapoint's index */
705 /* filter_offset: simply use the most recent value */
706 p->filter_offset = fdp[idx].d_offset;
710 * filter_dispersion = \ -------------
717 for (i = 0; i < NUM_DATAPOINTS; i++) {
718 sum += dispersion(&fdp[idx]) / (2 << i);
719 wavg += fdp[idx].d_offset;
720 idx = (idx - 1) & (NUM_DATAPOINTS - 1);
722 wavg /= NUM_DATAPOINTS;
723 p->filter_dispersion = sum;
726 /* +----- -----+ ^ 1/2
730 * filter_jitter = | --- * / (avg-offset_j) |
734 * where n is the number of valid datapoints in the filter (n > 1);
735 * if filter_jitter < precision then filter_jitter = precision
738 for (i = 0; i < NUM_DATAPOINTS; i++) {
739 sum += SQUARE(wavg - fdp[i].d_offset);
741 sum = SQRT(sum / NUM_DATAPOINTS);
742 p->filter_jitter = sum > G_precision_sec ? sum : G_precision_sec;
744 VERB4 bb_error_msg("filter offset:%+f disp:%f jitter:%f",
746 p->filter_dispersion,
751 reset_peer_stats(peer_t *p, double offset)
754 bool small_ofs = fabs(offset) < STEP_THRESHOLD;
756 /* Used to set p->filter_datapoint[i].d_dispersion = MAXDISP
757 * and clear reachable bits, but this proved to be too agressive:
758 * after step (tested with suspending laptop for ~30 secs),
759 * this caused all previous data to be considered invalid,
760 * making us needing to collect full ~8 datapoins per peer
761 * after step in order to start trusting them.
762 * In turn, this was making poll interval decrease even after
763 * step was done. (Poll interval decreases already before step
764 * in this scenario, because we see large offsets and end up with
765 * no good peer to select).
768 for (i = 0; i < NUM_DATAPOINTS; i++) {
770 p->filter_datapoint[i].d_recv_time += offset;
771 if (p->filter_datapoint[i].d_offset != 0) {
772 p->filter_datapoint[i].d_offset -= offset;
773 //bb_error_msg("p->filter_datapoint[%d].d_offset %f -> %f",
775 // p->filter_datapoint[i].d_offset + offset,
776 // p->filter_datapoint[i].d_offset);
779 p->filter_datapoint[i].d_recv_time = G.cur_time;
780 p->filter_datapoint[i].d_offset = 0;
781 /*p->filter_datapoint[i].d_dispersion = MAXDISP;*/
785 p->lastpkt_recv_time += offset;
787 /*p->reachable_bits = 0;*/
788 p->lastpkt_recv_time = G.cur_time;
790 filter_datapoints(p); /* recalc p->filter_xxx */
791 VERB6 bb_error_msg("%s->lastpkt_recv_time=%f", p->p_dotted, p->lastpkt_recv_time);
794 static len_and_sockaddr*
795 resolve_peer_hostname(peer_t *p)
797 len_and_sockaddr *lsa = host2sockaddr(p->p_hostname, 123);
802 p->p_dotted = xmalloc_sockaddr2dotted_noport(&lsa->u.sa);
804 /* error message is emitted by host2sockaddr() */
805 set_next(p, HOSTNAME_INTERVAL);
811 add_peers(const char *s)
816 p = xzalloc(sizeof(*p) + strlen(s));
817 strcpy(p->p_hostname, s);
819 p->p_xmt_msg.m_status = MODE_CLIENT | (NTP_VERSION << 3);
820 p->next_action_time = G.cur_time; /* = set_next(p, 0); */
821 reset_peer_stats(p, STEP_THRESHOLD);
823 /* Names like N.<country2chars>.pool.ntp.org are randomly resolved
824 * to a pool of machines. Sometimes different N's resolve to the same IP.
825 * It is not useful to have two peers with same IP. We skip duplicates.
827 if (resolve_peer_hostname(p)) {
828 for (item = G.ntp_peers; item != NULL; item = item->link) {
829 peer_t *pp = (peer_t *) item->data;
830 if (pp->p_dotted && strcmp(p->p_dotted, pp->p_dotted) == 0) {
831 bb_error_msg("duplicate peer %s (%s)", s, p->p_dotted);
840 llist_add_to(&G.ntp_peers, p);
846 const struct sockaddr *from, const struct sockaddr *to, socklen_t addrlen,
847 msg_t *msg, ssize_t len)
853 ret = sendto(fd, msg, len, MSG_DONTWAIT, to, addrlen);
855 ret = send_to_from(fd, msg, len, MSG_DONTWAIT, to, from, addrlen);
858 bb_perror_msg("send failed");
865 send_query_to_peer(peer_t *p)
868 if (!resolve_peer_hostname(p))
872 /* Why do we need to bind()?
873 * See what happens when we don't bind:
875 * socket(PF_INET, SOCK_DGRAM, IPPROTO_IP) = 3
876 * setsockopt(3, SOL_IP, IP_TOS, [16], 4) = 0
877 * gettimeofday({1259071266, 327885}, NULL) = 0
878 * sendto(3, "xxx", 48, MSG_DONTWAIT, {sa_family=AF_INET, sin_port=htons(123), sin_addr=inet_addr("10.34.32.125")}, 16) = 48
879 * ^^^ we sent it from some source port picked by kernel.
880 * time(NULL) = 1259071266
881 * write(2, "ntpd: entering poll 15 secs\n", 28) = 28
882 * poll([{fd=3, events=POLLIN}], 1, 15000) = 1 ([{fd=3, revents=POLLIN}])
883 * recv(3, "yyy", 68, MSG_DONTWAIT) = 48
884 * ^^^ this recv will receive packets to any local port!
886 * Uncomment this and use strace to see it in action:
888 #define PROBE_LOCAL_ADDR /* { len_and_sockaddr lsa; lsa.len = LSA_SIZEOF_SA; getsockname(p->query.fd, &lsa.u.sa, &lsa.len); } */
892 len_and_sockaddr *local_lsa;
894 family = p->p_lsa->u.sa.sa_family;
895 p->p_fd = fd = xsocket_type(&local_lsa, family, SOCK_DGRAM);
896 /* local_lsa has "null" address and port 0 now.
897 * bind() ensures we have a *particular port* selected by kernel
898 * and remembered in p->p_fd, thus later recv(p->p_fd)
899 * receives only packets sent to this port.
902 xbind(fd, &local_lsa->u.sa, local_lsa->len);
904 #if ENABLE_FEATURE_IPV6
905 if (family == AF_INET)
907 setsockopt_int(fd, IPPROTO_IP, IP_TOS, IPTOS_LOWDELAY);
911 /* Emit message _before_ attempted send. Think of a very short
912 * roundtrip networks: we need to go back to recv loop ASAP,
913 * to reduce delay. Printing messages after send works against that.
915 VERB1 bb_error_msg("sending query to %s", p->p_dotted);
918 * Send out a random 64-bit number as our transmit time. The NTP
919 * server will copy said number into the originate field on the
920 * response that it sends us. This is totally legal per the SNTP spec.
922 * The impact of this is two fold: we no longer send out the current
923 * system time for the world to see (which may aid an attacker), and
924 * it gives us a (not very secure) way of knowing that we're not
925 * getting spoofed by an attacker that can't capture our traffic
926 * but can spoof packets from the NTP server we're communicating with.
928 * Save the real transmit timestamp locally.
930 p->p_xmt_msg.m_xmttime.int_partl = rand();
931 p->p_xmt_msg.m_xmttime.fractionl = rand();
932 p->p_xmttime = gettime1900d();
934 /* Were doing it only if sendto worked, but
935 * loss of sync detection needs reachable_bits updated
936 * even if sending fails *locally*:
937 * "network is unreachable" because cable was pulled?
938 * We still need to declare "unsync" if this condition persists.
940 p->reachable_bits <<= 1;
942 if (do_sendto(p->p_fd, /*from:*/ NULL, /*to:*/ &p->p_lsa->u.sa, /*addrlen:*/ p->p_lsa->len,
943 &p->p_xmt_msg, NTP_MSGSIZE_NOAUTH) == -1
948 * We know that we sent nothing.
949 * We can retry *soon* without fearing
950 * that we are flooding the peer.
952 set_next(p, RETRY_INTERVAL);
956 set_next(p, RESPONSE_INTERVAL);
960 /* Note that there is no provision to prevent several run_scripts
961 * to be started in quick succession. In fact, it happens rather often
962 * if initial syncronization results in a step.
963 * You will see "step" and then "stratum" script runs, sometimes
964 * as close as only 0.002 seconds apart.
965 * Script should be ready to deal with this.
967 static void run_script(const char *action, double offset)
970 char *env1, *env2, *env3, *env4;
972 G.last_script_run = G.cur_time;
977 argv[0] = (char*) G.script_name;
978 argv[1] = (char*) action;
981 VERB1 bb_error_msg("executing '%s %s'", G.script_name, action);
983 env1 = xasprintf("%s=%u", "stratum", G.stratum);
985 env2 = xasprintf("%s=%ld", "freq_drift_ppm", G.kernel_freq_drift);
987 env3 = xasprintf("%s=%u", "poll_interval", 1 << G.poll_exp);
989 env4 = xasprintf("%s=%f", "offset", offset);
991 /* Other items of potential interest: selected peer,
992 * rootdelay, reftime, rootdisp, refid, ntp_status,
993 * last_update_offset, last_update_recv_time, discipline_jitter,
994 * how many peers have reachable_bits = 0?
997 /* Don't want to wait: it may run hwclock --systohc, and that
998 * may take some time (seconds): */
999 /*spawn_and_wait(argv);*/
1002 unsetenv("stratum");
1003 unsetenv("freq_drift_ppm");
1004 unsetenv("poll_interval");
1012 static NOINLINE void
1013 step_time(double offset)
1017 struct timeval tvc, tvn;
1018 char buf[sizeof("yyyy-mm-dd hh:mm:ss") + /*paranoia:*/ 4];
1021 gettimeofday(&tvc, NULL); /* never fails */
1022 dtime = tvc.tv_sec + (1.0e-6 * tvc.tv_usec) + offset;
1023 d_to_tv(dtime, &tvn);
1024 if (settimeofday(&tvn, NULL) == -1)
1025 bb_perror_msg_and_die("settimeofday");
1029 strftime_YYYYMMDDHHMMSS(buf, sizeof(buf), &tval);
1030 bb_error_msg("current time is %s.%06u", buf, (unsigned)tvc.tv_usec);
1033 strftime_YYYYMMDDHHMMSS(buf, sizeof(buf), &tval);
1034 bb_error_msg("setting time to %s.%06u (offset %+fs)", buf, (unsigned)tvn.tv_usec, offset);
1036 /* Correct various fields which contain time-relative values: */
1039 G.cur_time += offset;
1040 G.last_update_recv_time += offset;
1041 G.last_script_run += offset;
1043 /* p->lastpkt_recv_time, p->next_action_time and such: */
1044 for (item = G.ntp_peers; item != NULL; item = item->link) {
1045 peer_t *pp = (peer_t *) item->data;
1046 reset_peer_stats(pp, offset);
1047 //bb_error_msg("offset:%+f pp->next_action_time:%f -> %f",
1048 // offset, pp->next_action_time, pp->next_action_time + offset);
1049 pp->next_action_time += offset;
1050 if (pp->p_fd >= 0) {
1051 /* We wait for reply from this peer too.
1052 * But due to step we are doing, reply's data is no longer
1053 * useful (in fact, it'll be bogus). Stop waiting for it.
1057 set_next(pp, RETRY_INTERVAL);
1062 static void clamp_pollexp_and_set_MAXSTRAT(void)
1064 if (G.poll_exp < MINPOLL)
1065 G.poll_exp = MINPOLL;
1066 if (G.poll_exp > BIGPOLL)
1067 G.poll_exp = BIGPOLL;
1068 G.polladj_count = 0;
1069 G.stratum = MAXSTRAT;
1074 * Selection and clustering, and their helpers
1080 double opt_rd; /* optimization */
1083 compare_point_edge(const void *aa, const void *bb)
1085 const point_t *a = aa;
1086 const point_t *b = bb;
1087 if (a->edge < b->edge) {
1090 return (a->edge > b->edge);
1097 compare_survivor_metric(const void *aa, const void *bb)
1099 const survivor_t *a = aa;
1100 const survivor_t *b = bb;
1101 if (a->metric < b->metric) {
1104 return (a->metric > b->metric);
1107 fit(peer_t *p, double rd)
1109 if ((p->reachable_bits & (p->reachable_bits-1)) == 0) {
1110 /* One or zero bits in reachable_bits */
1111 VERB4 bb_error_msg("peer %s unfit for selection: unreachable", p->p_dotted);
1114 #if 0 /* we filter out such packets earlier */
1115 if ((p->lastpkt_status & LI_ALARM) == LI_ALARM
1116 || p->lastpkt_stratum >= MAXSTRAT
1118 VERB4 bb_error_msg("peer %s unfit for selection: bad status/stratum", p->p_dotted);
1122 /* rd is root_distance(p) */
1123 if (rd > MAXDIST + FREQ_TOLERANCE * (1 << G.poll_exp)) {
1124 VERB4 bb_error_msg("peer %s unfit for selection: root distance too high", p->p_dotted);
1128 // /* Do we have a loop? */
1129 // if (p->refid == p->dstaddr || p->refid == s.refid)
1134 select_and_cluster(void)
1139 int size = 3 * G.peer_cnt;
1140 /* for selection algorithm */
1141 point_t point[size];
1142 unsigned num_points, num_candidates;
1144 unsigned num_falsetickers;
1145 /* for cluster algorithm */
1146 survivor_t survivor[size];
1147 unsigned num_survivors;
1153 while (item != NULL) {
1156 p = (peer_t *) item->data;
1157 rd = root_distance(p);
1158 offset = p->filter_offset;
1164 VERB5 bb_error_msg("interval: [%f %f %f] %s",
1170 point[num_points].p = p;
1171 point[num_points].type = -1;
1172 point[num_points].edge = offset - rd;
1173 point[num_points].opt_rd = rd;
1175 point[num_points].p = p;
1176 point[num_points].type = 0;
1177 point[num_points].edge = offset;
1178 point[num_points].opt_rd = rd;
1180 point[num_points].p = p;
1181 point[num_points].type = 1;
1182 point[num_points].edge = offset + rd;
1183 point[num_points].opt_rd = rd;
1187 num_candidates = num_points / 3;
1188 if (num_candidates == 0) {
1189 VERB3 bb_error_msg("no valid datapoints%s", ", no peer selected");
1192 //TODO: sorting does not seem to be done in reference code
1193 qsort(point, num_points, sizeof(point[0]), compare_point_edge);
1195 /* Start with the assumption that there are no falsetickers.
1196 * Attempt to find a nonempty intersection interval containing
1197 * the midpoints of all truechimers.
1198 * If a nonempty interval cannot be found, increase the number
1199 * of assumed falsetickers by one and try again.
1200 * If a nonempty interval is found and the number of falsetickers
1201 * is less than the number of truechimers, a majority has been found
1202 * and the midpoint of each truechimer represents
1203 * the candidates available to the cluster algorithm.
1205 num_falsetickers = 0;
1208 unsigned num_midpoints = 0;
1213 for (i = 0; i < num_points; i++) {
1215 * if (point[i].type == -1) c++;
1216 * if (point[i].type == 1) c--;
1217 * and it's simpler to do it this way:
1220 if (c >= num_candidates - num_falsetickers) {
1221 /* If it was c++ and it got big enough... */
1222 low = point[i].edge;
1225 if (point[i].type == 0)
1229 for (i = num_points-1; i >= 0; i--) {
1231 if (c >= num_candidates - num_falsetickers) {
1232 high = point[i].edge;
1235 if (point[i].type == 0)
1238 /* If the number of midpoints is greater than the number
1239 * of allowed falsetickers, the intersection contains at
1240 * least one truechimer with no midpoint - bad.
1241 * Also, interval should be nonempty.
1243 if (num_midpoints <= num_falsetickers && low < high)
1246 if (num_falsetickers * 2 >= num_candidates) {
1247 VERB3 bb_error_msg("falsetickers:%d, candidates:%d%s",
1248 num_falsetickers, num_candidates,
1249 ", no peer selected");
1253 VERB4 bb_error_msg("selected interval: [%f, %f]; candidates:%d falsetickers:%d",
1254 low, high, num_candidates, num_falsetickers);
1258 /* Construct a list of survivors (p, metric)
1259 * from the chime list, where metric is dominated
1260 * first by stratum and then by root distance.
1261 * All other things being equal, this is the order of preference.
1264 for (i = 0; i < num_points; i++) {
1265 if (point[i].edge < low || point[i].edge > high)
1268 survivor[num_survivors].p = p;
1269 /* x.opt_rd == root_distance(p); */
1270 survivor[num_survivors].metric = MAXDIST * p->lastpkt_stratum + point[i].opt_rd;
1271 VERB5 bb_error_msg("survivor[%d] metric:%f peer:%s",
1272 num_survivors, survivor[num_survivors].metric, p->p_dotted);
1275 /* There must be at least MIN_SELECTED survivors to satisfy the
1276 * correctness assertions. Ordinarily, the Byzantine criteria
1277 * require four survivors, but for the demonstration here, one
1280 if (num_survivors < MIN_SELECTED) {
1281 VERB3 bb_error_msg("survivors:%d%s",
1283 ", no peer selected");
1287 //looks like this is ONLY used by the fact that later we pick survivor[0].
1288 //we can avoid sorting then, just find the minimum once!
1289 qsort(survivor, num_survivors, sizeof(survivor[0]), compare_survivor_metric);
1291 /* For each association p in turn, calculate the selection
1292 * jitter p->sjitter as the square root of the sum of squares
1293 * (p->offset - q->offset) over all q associations. The idea is
1294 * to repeatedly discard the survivor with maximum selection
1295 * jitter until a termination condition is met.
1298 unsigned max_idx = max_idx;
1299 double max_selection_jitter = max_selection_jitter;
1300 double min_jitter = min_jitter;
1302 if (num_survivors <= MIN_CLUSTERED) {
1303 VERB4 bb_error_msg("num_survivors %d <= %d, not discarding more",
1304 num_survivors, MIN_CLUSTERED);
1308 /* To make sure a few survivors are left
1309 * for the clustering algorithm to chew on,
1310 * we stop if the number of survivors
1311 * is less than or equal to MIN_CLUSTERED (3).
1313 for (i = 0; i < num_survivors; i++) {
1314 double selection_jitter_sq;
1317 if (i == 0 || p->filter_jitter < min_jitter)
1318 min_jitter = p->filter_jitter;
1320 selection_jitter_sq = 0;
1321 for (j = 0; j < num_survivors; j++) {
1322 peer_t *q = survivor[j].p;
1323 selection_jitter_sq += SQUARE(p->filter_offset - q->filter_offset);
1325 if (i == 0 || selection_jitter_sq > max_selection_jitter) {
1326 max_selection_jitter = selection_jitter_sq;
1329 VERB6 bb_error_msg("survivor %d selection_jitter^2:%f",
1330 i, selection_jitter_sq);
1332 max_selection_jitter = SQRT(max_selection_jitter / num_survivors);
1333 VERB5 bb_error_msg("max_selection_jitter (at %d):%f min_jitter:%f",
1334 max_idx, max_selection_jitter, min_jitter);
1336 /* If the maximum selection jitter is less than the
1337 * minimum peer jitter, then tossing out more survivors
1338 * will not lower the minimum peer jitter, so we might
1341 if (max_selection_jitter < min_jitter) {
1342 VERB4 bb_error_msg("max_selection_jitter:%f < min_jitter:%f, num_survivors:%d, not discarding more",
1343 max_selection_jitter, min_jitter, num_survivors);
1347 /* Delete survivor[max_idx] from the list
1348 * and go around again.
1350 VERB6 bb_error_msg("dropping survivor %d", max_idx);
1352 while (max_idx < num_survivors) {
1353 survivor[max_idx] = survivor[max_idx + 1];
1359 /* Combine the offsets of the clustering algorithm survivors
1360 * using a weighted average with weight determined by the root
1361 * distance. Compute the selection jitter as the weighted RMS
1362 * difference between the first survivor and the remaining
1363 * survivors. In some cases the inherent clock jitter can be
1364 * reduced by not using this algorithm, especially when frequent
1365 * clockhopping is involved. bbox: thus we don't do it.
1369 for (i = 0; i < num_survivors; i++) {
1371 x = root_distance(p);
1373 z += p->filter_offset / x;
1374 w += SQUARE(p->filter_offset - survivor[0].p->filter_offset) / x;
1376 //G.cluster_offset = z / y;
1377 //G.cluster_jitter = SQRT(w / y);
1380 /* Pick the best clock. If the old system peer is on the list
1381 * and at the same stratum as the first survivor on the list,
1382 * then don't do a clock hop. Otherwise, select the first
1383 * survivor on the list as the new system peer.
1386 if (G.last_update_peer
1387 && G.last_update_peer->lastpkt_stratum <= p->lastpkt_stratum
1389 /* Starting from 1 is ok here */
1390 for (i = 1; i < num_survivors; i++) {
1391 if (G.last_update_peer == survivor[i].p) {
1392 VERB5 bb_error_msg("keeping old synced peer");
1393 p = G.last_update_peer;
1398 G.last_update_peer = p;
1400 VERB4 bb_error_msg("selected peer %s filter_offset:%+f age:%f",
1403 G.cur_time - p->lastpkt_recv_time
1410 * Local clock discipline and its helpers
1413 set_new_values(int disc_state, double offset, double recv_time)
1415 /* Enter new state and set state variables. Note we use the time
1416 * of the last clock filter sample, which must be earlier than
1419 VERB4 bb_error_msg("disc_state=%d last update offset=%f recv_time=%f",
1420 disc_state, offset, recv_time);
1421 G.discipline_state = disc_state;
1422 G.last_update_offset = offset;
1423 G.last_update_recv_time = recv_time;
1425 /* Return: -1: decrease poll interval, 0: leave as is, 1: increase */
1427 update_local_clock(peer_t *p)
1431 /* Note: can use G.cluster_offset instead: */
1432 double offset = p->filter_offset;
1433 double recv_time = p->lastpkt_recv_time;
1435 #if !USING_KERNEL_PLL_LOOP
1438 #if !USING_KERNEL_PLL_LOOP || USING_INITIAL_FREQ_ESTIMATION
1439 double since_last_update;
1441 double etemp, dtemp;
1443 abs_offset = fabs(offset);
1446 /* If needed, -S script can do it by looking at $offset
1447 * env var and killing parent */
1448 /* If the offset is too large, give up and go home */
1449 if (abs_offset > PANIC_THRESHOLD) {
1450 bb_error_msg_and_die("offset %f far too big, exiting", offset);
1454 /* If this is an old update, for instance as the result
1455 * of a system peer change, avoid it. We never use
1456 * an old sample or the same sample twice.
1458 if (recv_time <= G.last_update_recv_time) {
1459 VERB3 bb_error_msg("update from %s: same or older datapoint, not using it",
1461 return 0; /* "leave poll interval as is" */
1464 /* Clock state machine transition function. This is where the
1465 * action is and defines how the system reacts to large time
1466 * and frequency errors.
1468 #if !USING_KERNEL_PLL_LOOP || USING_INITIAL_FREQ_ESTIMATION
1469 since_last_update = recv_time - G.reftime;
1471 #if !USING_KERNEL_PLL_LOOP
1474 #if USING_INITIAL_FREQ_ESTIMATION
1475 if (G.discipline_state == STATE_FREQ) {
1476 /* Ignore updates until the stepout threshold */
1477 if (since_last_update < WATCH_THRESHOLD) {
1478 VERB4 bb_error_msg("measuring drift, datapoint ignored, %f sec remains",
1479 WATCH_THRESHOLD - since_last_update);
1480 return 0; /* "leave poll interval as is" */
1482 # if !USING_KERNEL_PLL_LOOP
1483 freq_drift = (offset - G.last_update_offset) / since_last_update;
1488 /* There are two main regimes: when the
1489 * offset exceeds the step threshold and when it does not.
1491 if (abs_offset > STEP_THRESHOLD) {
1495 // This "spike state" seems to be useless, peer selection already drops
1496 // occassional "bad" datapoints. If we are here, there were _many_
1497 // large offsets. When a few first large offsets are seen,
1498 // we end up in "no valid datapoints, no peer selected" state.
1499 // Only when enough of them are seen (which means it's not a fluke),
1500 // we end up here. Looks like _our_ clock is off.
1501 switch (G.discipline_state) {
1503 /* The first outlyer: ignore it, switch to SPIK state */
1504 VERB3 bb_error_msg("update from %s: offset:%+f, spike%s",
1505 p->p_dotted, offset,
1507 G.discipline_state = STATE_SPIK;
1508 return -1; /* "decrease poll interval" */
1511 /* Ignore succeeding outlyers until either an inlyer
1512 * is found or the stepout threshold is exceeded.
1514 remains = WATCH_THRESHOLD - since_last_update;
1516 VERB3 bb_error_msg("update from %s: offset:%+f, spike%s",
1517 p->p_dotted, offset,
1518 ", datapoint ignored");
1519 return -1; /* "decrease poll interval" */
1521 /* fall through: we need to step */
1525 /* Step the time and clamp down the poll interval.
1527 * In NSET state an initial frequency correction is
1528 * not available, usually because the frequency file has
1529 * not yet been written. Since the time is outside the
1530 * capture range, the clock is stepped. The frequency
1531 * will be set directly following the stepout interval.
1533 * In FSET state the initial frequency has been set
1534 * from the frequency file. Since the time is outside
1535 * the capture range, the clock is stepped immediately,
1536 * rather than after the stepout interval. Guys get
1537 * nervous if it takes 17 minutes to set the clock for
1540 * In SPIK state the stepout threshold has expired and
1541 * the phase is still above the step threshold. Note
1542 * that a single spike greater than the step threshold
1543 * is always suppressed, even at the longer poll
1546 VERB4 bb_error_msg("stepping time by %+f; poll_exp=MINPOLL", offset);
1548 if (option_mask32 & OPT_q) {
1549 /* We were only asked to set time once. Done. */
1553 clamp_pollexp_and_set_MAXSTRAT();
1555 run_script("step", offset);
1557 recv_time += offset;
1559 #if USING_INITIAL_FREQ_ESTIMATION
1560 if (G.discipline_state == STATE_NSET) {
1561 set_new_values(STATE_FREQ, /*offset:*/ 0, recv_time);
1562 return 1; /* "ok to increase poll interval" */
1565 abs_offset = offset = 0;
1566 set_new_values(STATE_SYNC, offset, recv_time);
1567 } else { /* abs_offset <= STEP_THRESHOLD */
1569 /* The ratio is calculated before jitter is updated to make
1570 * poll adjust code more sensitive to large offsets.
1572 G.offset_to_jitter_ratio = abs_offset / G.discipline_jitter;
1574 /* Compute the clock jitter as the RMS of exponentially
1575 * weighted offset differences. Used by the poll adjust code.
1577 etemp = SQUARE(G.discipline_jitter);
1578 dtemp = SQUARE(offset - G.last_update_offset);
1579 G.discipline_jitter = SQRT(etemp + (dtemp - etemp) / AVG);
1580 if (G.discipline_jitter < G_precision_sec)
1581 G.discipline_jitter = G_precision_sec;
1583 switch (G.discipline_state) {
1585 if (option_mask32 & OPT_q) {
1586 /* We were only asked to set time once.
1587 * The clock is precise enough, no need to step.
1591 #if USING_INITIAL_FREQ_ESTIMATION
1592 /* This is the first update received and the frequency
1593 * has not been initialized. The first thing to do
1594 * is directly measure the oscillator frequency.
1596 set_new_values(STATE_FREQ, offset, recv_time);
1598 set_new_values(STATE_SYNC, offset, recv_time);
1600 VERB4 bb_error_msg("transitioning to FREQ, datapoint ignored");
1601 return 0; /* "leave poll interval as is" */
1603 #if 0 /* this is dead code for now */
1605 /* This is the first update and the frequency
1606 * has been initialized. Adjust the phase, but
1607 * don't adjust the frequency until the next update.
1609 set_new_values(STATE_SYNC, offset, recv_time);
1610 /* freq_drift remains 0 */
1614 #if USING_INITIAL_FREQ_ESTIMATION
1616 /* since_last_update >= WATCH_THRESHOLD, we waited enough.
1617 * Correct the phase and frequency and switch to SYNC state.
1618 * freq_drift was already estimated (see code above)
1620 set_new_values(STATE_SYNC, offset, recv_time);
1625 #if !USING_KERNEL_PLL_LOOP
1626 /* Compute freq_drift due to PLL and FLL contributions.
1628 * The FLL and PLL frequency gain constants
1629 * depend on the poll interval and Allan
1630 * intercept. The FLL is not used below one-half
1631 * the Allan intercept. Above that the loop gain
1632 * increases in steps to 1 / AVG.
1634 if ((1 << G.poll_exp) > ALLAN / 2) {
1635 etemp = FLL - G.poll_exp;
1638 freq_drift += (offset - G.last_update_offset) / (MAXD(since_last_update, ALLAN) * etemp);
1640 /* For the PLL the integration interval
1641 * (numerator) is the minimum of the update
1642 * interval and poll interval. This allows
1643 * oversampling, but not undersampling.
1645 etemp = MIND(since_last_update, (1 << G.poll_exp));
1646 dtemp = (4 * PLL) << G.poll_exp;
1647 freq_drift += offset * etemp / SQUARE(dtemp);
1649 set_new_values(STATE_SYNC, offset, recv_time);
1652 if (G.stratum != p->lastpkt_stratum + 1) {
1653 G.stratum = p->lastpkt_stratum + 1;
1654 run_script("stratum", offset);
1658 G.reftime = G.cur_time;
1659 G.ntp_status = p->lastpkt_status;
1660 G.refid = p->lastpkt_refid;
1661 G.rootdelay = p->lastpkt_rootdelay + p->lastpkt_delay;
1662 dtemp = p->filter_jitter; // SQRT(SQUARE(p->filter_jitter) + SQUARE(G.cluster_jitter));
1663 dtemp += MAXD(p->filter_dispersion + FREQ_TOLERANCE * (G.cur_time - p->lastpkt_recv_time) + abs_offset, MINDISP);
1664 G.rootdisp = p->lastpkt_rootdisp + dtemp;
1665 VERB4 bb_error_msg("updating leap/refid/reftime/rootdisp from peer %s", p->p_dotted);
1667 /* We are in STATE_SYNC now, but did not do adjtimex yet.
1668 * (Any other state does not reach this, they all return earlier)
1669 * By this time, freq_drift and offset are set
1670 * to values suitable for adjtimex.
1672 #if !USING_KERNEL_PLL_LOOP
1673 /* Calculate the new frequency drift and frequency stability (wander).
1674 * Compute the clock wander as the RMS of exponentially weighted
1675 * frequency differences. This is not used directly, but can,
1676 * along with the jitter, be a highly useful monitoring and
1679 dtemp = G.discipline_freq_drift + freq_drift;
1680 G.discipline_freq_drift = MAXD(MIND(MAXDRIFT, dtemp), -MAXDRIFT);
1681 etemp = SQUARE(G.discipline_wander);
1682 dtemp = SQUARE(dtemp);
1683 G.discipline_wander = SQRT(etemp + (dtemp - etemp) / AVG);
1685 VERB4 bb_error_msg("discipline freq_drift=%.9f(int:%ld corr:%e) wander=%f",
1686 G.discipline_freq_drift,
1687 (long)(G.discipline_freq_drift * 65536e6),
1689 G.discipline_wander);
1692 memset(&tmx, 0, sizeof(tmx));
1693 if (adjtimex(&tmx) < 0)
1694 bb_perror_msg_and_die("adjtimex");
1695 bb_error_msg("p adjtimex freq:%ld offset:%+ld status:0x%x tc:%ld",
1696 tmx.freq, tmx.offset, tmx.status, tmx.constant);
1699 memset(&tmx, 0, sizeof(tmx));
1701 //doesn't work, offset remains 0 (!) in kernel:
1702 //ntpd: set adjtimex freq:1786097 tmx.offset:77487
1703 //ntpd: prev adjtimex freq:1786097 tmx.offset:0
1704 //ntpd: cur adjtimex freq:1786097 tmx.offset:0
1705 tmx.modes = ADJ_FREQUENCY | ADJ_OFFSET;
1706 /* 65536 is one ppm */
1707 tmx.freq = G.discipline_freq_drift * 65536e6;
1709 tmx.modes = ADJ_OFFSET | ADJ_STATUS | ADJ_TIMECONST;// | ADJ_MAXERROR | ADJ_ESTERROR;
1710 tmx.constant = (int)G.poll_exp - 4;
1712 * The below if statement should be unnecessary, but...
1713 * It looks like Linux kernel's PLL is far too gentle in changing
1714 * tmx.freq in response to clock offset. Offset keeps growing
1715 * and eventually we fall back to smaller poll intervals.
1716 * We can make correction more agressive (about x2) by supplying
1717 * PLL time constant which is one less than the real one.
1718 * To be on a safe side, let's do it only if offset is significantly
1719 * larger than jitter.
1721 if (G.offset_to_jitter_ratio >= TIMECONST_HACK_GATE)
1723 tmx.offset = (long)(offset * 1000000); /* usec */
1724 if (SLEW_THRESHOLD < STEP_THRESHOLD) {
1725 if (tmx.offset > (long)(SLEW_THRESHOLD * 1000000)) {
1726 tmx.offset = (long)(SLEW_THRESHOLD * 1000000);
1729 if (tmx.offset < -(long)(SLEW_THRESHOLD * 1000000)) {
1730 tmx.offset = -(long)(SLEW_THRESHOLD * 1000000);
1734 if (tmx.constant < 0)
1737 tmx.status = STA_PLL;
1738 if (G.ntp_status & LI_PLUSSEC)
1739 tmx.status |= STA_INS;
1740 if (G.ntp_status & LI_MINUSSEC)
1741 tmx.status |= STA_DEL;
1743 //tmx.esterror = (uint32_t)(clock_jitter * 1e6);
1744 //tmx.maxerror = (uint32_t)((sys_rootdelay / 2 + sys_rootdisp) * 1e6);
1745 rc = adjtimex(&tmx);
1747 bb_perror_msg_and_die("adjtimex");
1748 /* NB: here kernel returns constant == G.poll_exp, not == G.poll_exp - 4.
1749 * Not sure why. Perhaps it is normal.
1751 VERB4 bb_error_msg("adjtimex:%d freq:%ld offset:%+ld status:0x%x",
1752 rc, tmx.freq, tmx.offset, tmx.status);
1753 G.kernel_freq_drift = tmx.freq / 65536;
1754 VERB2 bb_error_msg("update from:%s offset:%+f delay:%f jitter:%f clock drift:%+.3fppm tc:%d",
1758 G.discipline_jitter,
1759 (double)tmx.freq / 65536,
1763 return 1; /* "ok to increase poll interval" */
1768 * We've got a new reply packet from a peer, process it
1772 poll_interval(int upper_bound)
1774 unsigned interval, r, mask;
1775 interval = 1 << G.poll_exp;
1776 if (interval > upper_bound)
1777 interval = upper_bound;
1778 mask = ((interval-1) >> 4) | 1;
1780 interval += r & mask; /* ~ random(0..1) * interval/16 */
1781 VERB4 bb_error_msg("chose poll interval:%u (poll_exp:%d)", interval, G.poll_exp);
1785 adjust_poll(int count)
1787 G.polladj_count += count;
1788 if (G.polladj_count > POLLADJ_LIMIT) {
1789 G.polladj_count = 0;
1790 if (G.poll_exp < MAXPOLL) {
1792 VERB4 bb_error_msg("polladj: discipline_jitter:%f ++poll_exp=%d",
1793 G.discipline_jitter, G.poll_exp);
1795 } else if (G.polladj_count < -POLLADJ_LIMIT || (count < 0 && G.poll_exp > BIGPOLL)) {
1796 G.polladj_count = 0;
1797 if (G.poll_exp > MINPOLL) {
1801 /* Correct p->next_action_time in each peer
1802 * which waits for sending, so that they send earlier.
1803 * Old pp->next_action_time are on the order
1804 * of t + (1 << old_poll_exp) + small_random,
1805 * we simply need to subtract ~half of that.
1807 for (item = G.ntp_peers; item != NULL; item = item->link) {
1808 peer_t *pp = (peer_t *) item->data;
1810 pp->next_action_time -= (1 << G.poll_exp);
1812 VERB4 bb_error_msg("polladj: discipline_jitter:%f --poll_exp=%d",
1813 G.discipline_jitter, G.poll_exp);
1816 VERB4 bb_error_msg("polladj: count:%d", G.polladj_count);
1819 static NOINLINE void
1820 recv_and_process_peer_pkt(peer_t *p)
1825 double T1, T2, T3, T4;
1827 double prev_delay, delay;
1829 datapoint_t *datapoint;
1834 /* We can recvfrom here and check from.IP, but some multihomed
1835 * ntp servers reply from their *other IP*.
1836 * TODO: maybe we should check at least what we can: from.port == 123?
1839 size = recv(p->p_fd, &msg, sizeof(msg), MSG_DONTWAIT);
1844 if (errno == EAGAIN)
1845 /* There was no packet after all
1846 * (poll() returning POLLIN for a fd
1847 * is not a ironclad guarantee that data is there)
1851 * If you need a different handling for a specific
1852 * errno, always explain it in comment.
1854 bb_perror_msg_and_die("recv(%s) error", p->p_dotted);
1857 if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE) {
1858 bb_error_msg("malformed packet received from %s", p->p_dotted);
1862 if (msg.m_orgtime.int_partl != p->p_xmt_msg.m_xmttime.int_partl
1863 || msg.m_orgtime.fractionl != p->p_xmt_msg.m_xmttime.fractionl
1865 /* Somebody else's packet */
1869 /* We do not expect any more packets from this peer for now.
1870 * Closing the socket informs kernel about it.
1871 * We open a new socket when we send a new query.
1876 if ((msg.m_status & LI_ALARM) == LI_ALARM
1877 || msg.m_stratum == 0
1878 || msg.m_stratum > NTP_MAXSTRATUM
1880 bb_error_msg("reply from %s: peer is unsynced", p->p_dotted);
1882 * Stratum 0 responses may have commands in 32-bit m_refid field:
1883 * "DENY", "RSTR" - peer does not like us at all,
1884 * "RATE" - peer is overloaded, reduce polling freq.
1885 * If poll interval is small, increase it.
1887 if (G.poll_exp < BIGPOLL)
1888 goto increase_interval;
1889 goto pick_normal_interval;
1892 // /* Verify valid root distance */
1893 // if (msg.m_rootdelay / 2 + msg.m_rootdisp >= MAXDISP || p->lastpkt_reftime > msg.m_xmt)
1894 // return; /* invalid header values */
1897 * From RFC 2030 (with a correction to the delay math):
1899 * Timestamp Name ID When Generated
1900 * ------------------------------------------------------------
1901 * Originate Timestamp T1 time request sent by client
1902 * Receive Timestamp T2 time request received by server
1903 * Transmit Timestamp T3 time reply sent by server
1904 * Destination Timestamp T4 time reply received by client
1906 * The roundtrip delay and local clock offset are defined as
1908 * delay = (T4 - T1) - (T3 - T2); offset = ((T2 - T1) + (T3 - T4)) / 2
1911 T2 = lfp_to_d(msg.m_rectime);
1912 T3 = lfp_to_d(msg.m_xmttime);
1915 /* The delay calculation is a special case. In cases where the
1916 * server and client clocks are running at different rates and
1917 * with very fast networks, the delay can appear negative. In
1918 * order to avoid violating the Principle of Least Astonishment,
1919 * the delay is clamped not less than the system precision.
1921 delay = (T4 - T1) - (T3 - T2);
1922 if (delay < G_precision_sec)
1923 delay = G_precision_sec;
1925 * If this packet's delay is much bigger than the last one,
1926 * it's better to just ignore it than use its much less precise value.
1928 prev_delay = p->p_raw_delay;
1929 p->p_raw_delay = delay;
1930 if (p->reachable_bits && delay > prev_delay * BAD_DELAY_GROWTH) {
1931 bb_error_msg("reply from %s: delay %f is too high, ignoring", p->p_dotted, delay);
1932 goto pick_normal_interval;
1935 p->lastpkt_delay = delay;
1936 p->lastpkt_recv_time = T4;
1937 VERB6 bb_error_msg("%s->lastpkt_recv_time=%f", p->p_dotted, p->lastpkt_recv_time);
1938 p->lastpkt_status = msg.m_status;
1939 p->lastpkt_stratum = msg.m_stratum;
1940 p->lastpkt_rootdelay = sfp_to_d(msg.m_rootdelay);
1941 p->lastpkt_rootdisp = sfp_to_d(msg.m_rootdisp);
1942 p->lastpkt_refid = msg.m_refid;
1944 p->datapoint_idx = p->reachable_bits ? (p->datapoint_idx + 1) % NUM_DATAPOINTS : 0;
1945 datapoint = &p->filter_datapoint[p->datapoint_idx];
1946 datapoint->d_recv_time = T4;
1947 datapoint->d_offset = offset = ((T2 - T1) + (T3 - T4)) / 2;
1948 datapoint->d_dispersion = LOG2D(msg.m_precision_exp) + G_precision_sec;
1949 if (!p->reachable_bits) {
1950 /* 1st datapoint ever - replicate offset in every element */
1952 for (i = 0; i < NUM_DATAPOINTS; i++) {
1953 p->filter_datapoint[i].d_offset = offset;
1957 p->reachable_bits |= 1;
1958 if ((MAX_VERBOSE && G.verbose) || (option_mask32 & OPT_w)) {
1959 bb_error_msg("reply from %s: offset:%+f delay:%f status:0x%02x strat:%d refid:0x%08x rootdelay:%f reach:0x%02x",
1966 p->lastpkt_rootdelay,
1968 /* not shown: m_ppoll, m_precision_exp, m_rootdisp,
1969 * m_reftime, m_orgtime, m_rectime, m_xmttime
1974 /* Muck with statictics and update the clock */
1975 filter_datapoints(p);
1976 q = select_and_cluster();
1979 if (!(option_mask32 & OPT_w)) {
1980 rc = update_local_clock(q);
1982 //Disabled this because there is a case where largish offsets
1983 //are unavoidable: if network round-trip delay is, say, ~0.6s,
1984 //error in offset estimation would be ~delay/2 ~= 0.3s.
1985 //Thus, offsets will be usually in -0.3...0.3s range.
1986 //In this case, this code would keep poll interval small,
1987 //but it won't be helping.
1988 //BIGOFF check below deals with a case of seeing multi-second offsets.
1990 /* If drift is dangerously large, immediately
1991 * drop poll interval one step down.
1993 if (fabs(q->filter_offset) >= POLLDOWN_OFFSET) {
1994 VERB4 bb_error_msg("offset:%+f > POLLDOWN_OFFSET", q->filter_offset);
1995 adjust_poll(-POLLADJ_LIMIT * 3);
2001 /* No peer selected.
2002 * If poll interval is small, increase it.
2004 if (G.poll_exp < BIGPOLL)
2005 goto increase_interval;
2009 /* Adjust the poll interval by comparing the current offset
2010 * with the clock jitter. If the offset is less than
2011 * the clock jitter times a constant, then the averaging interval
2012 * is increased, otherwise it is decreased. A bit of hysteresis
2013 * helps calm the dance. Works best using burst mode.
2015 if (rc > 0 && G.offset_to_jitter_ratio <= POLLADJ_GATE) {
2016 /* was += G.poll_exp but it is a bit
2017 * too optimistic for my taste at high poll_exp's */
2019 adjust_poll(MINPOLL);
2022 bb_error_msg("want smaller interval: offset/jitter = %u",
2023 G.offset_to_jitter_ratio);
2024 adjust_poll(-G.poll_exp * 2);
2028 /* Decide when to send new query for this peer */
2029 pick_normal_interval:
2030 interval = poll_interval(INT_MAX);
2031 if (fabs(offset) >= BIGOFF && interval > BIGOFF_INTERVAL) {
2032 /* If we are synced, offsets are less than SLEW_THRESHOLD,
2033 * or at the very least not much larger than it.
2034 * Now we see a largish one.
2035 * Either this peer is feeling bad, or packet got corrupted,
2036 * or _our_ clock is wrong now and _all_ peers will show similar
2037 * largish offsets too.
2038 * I observed this with laptop suspend stopping clock.
2039 * In any case, it makes sense to make next request soonish:
2040 * cases 1 and 2: get a better datapoint,
2041 * case 3: allows to resync faster.
2043 interval = BIGOFF_INTERVAL;
2046 set_next(p, interval);
2049 #if ENABLE_FEATURE_NTPD_SERVER
2050 static NOINLINE void
2051 recv_and_process_client_pkt(void /*int fd*/)
2055 len_and_sockaddr *to;
2056 struct sockaddr *from;
2058 uint8_t query_status;
2059 l_fixedpt_t query_xmttime;
2061 to = get_sock_lsa(G_listen_fd);
2062 from = xzalloc(to->len);
2064 size = recv_from_to(G_listen_fd, &msg, sizeof(msg), MSG_DONTWAIT, from, &to->u.sa, to->len);
2065 if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE) {
2068 if (errno == EAGAIN)
2070 bb_perror_msg_and_die("recv");
2072 addr = xmalloc_sockaddr2dotted_noport(from);
2073 bb_error_msg("malformed packet received from %s: size %u", addr, (int)size);
2078 /* Respond only to client and symmetric active packets */
2079 if ((msg.m_status & MODE_MASK) != MODE_CLIENT
2080 && (msg.m_status & MODE_MASK) != MODE_SYM_ACT
2085 query_status = msg.m_status;
2086 query_xmttime = msg.m_xmttime;
2088 /* Build a reply packet */
2089 memset(&msg, 0, sizeof(msg));
2090 msg.m_status = G.stratum < MAXSTRAT ? (G.ntp_status & LI_MASK) : LI_ALARM;
2091 msg.m_status |= (query_status & VERSION_MASK);
2092 msg.m_status |= ((query_status & MODE_MASK) == MODE_CLIENT) ?
2093 MODE_SERVER : MODE_SYM_PAS;
2094 msg.m_stratum = G.stratum;
2095 msg.m_ppoll = G.poll_exp;
2096 msg.m_precision_exp = G_precision_exp;
2097 /* this time was obtained between poll() and recv() */
2098 msg.m_rectime = d_to_lfp(G.cur_time);
2099 msg.m_xmttime = d_to_lfp(gettime1900d()); /* this instant */
2100 if (G.peer_cnt == 0) {
2101 /* we have no peers: "stratum 1 server" mode. reftime = our own time */
2102 G.reftime = G.cur_time;
2104 msg.m_reftime = d_to_lfp(G.reftime);
2105 msg.m_orgtime = query_xmttime;
2106 msg.m_rootdelay = d_to_sfp(G.rootdelay);
2107 //simple code does not do this, fix simple code!
2108 msg.m_rootdisp = d_to_sfp(G.rootdisp);
2109 //version = (query_status & VERSION_MASK); /* ... >> VERSION_SHIFT - done below instead */
2110 msg.m_refid = G.refid; // (version > (3 << VERSION_SHIFT)) ? G.refid : G.refid3;
2112 /* We reply from the local address packet was sent to,
2113 * this makes to/from look swapped here: */
2114 do_sendto(G_listen_fd,
2115 /*from:*/ &to->u.sa, /*to:*/ from, /*addrlen:*/ to->len,
2124 /* Upstream ntpd's options:
2126 * -4 Force DNS resolution of host names to the IPv4 namespace.
2127 * -6 Force DNS resolution of host names to the IPv6 namespace.
2128 * -a Require cryptographic authentication for broadcast client,
2129 * multicast client and symmetric passive associations.
2130 * This is the default.
2131 * -A Do not require cryptographic authentication for broadcast client,
2132 * multicast client and symmetric passive associations.
2133 * This is almost never a good idea.
2134 * -b Enable the client to synchronize to broadcast servers.
2136 * Specify the name and path of the configuration file,
2137 * default /etc/ntp.conf
2138 * -d Specify debugging mode. This option may occur more than once,
2139 * with each occurrence indicating greater detail of display.
2141 * Specify debugging level directly.
2143 * Specify the name and path of the frequency file.
2144 * This is the same operation as the "driftfile FILE"
2145 * configuration command.
2146 * -g Normally, ntpd exits with a message to the system log
2147 * if the offset exceeds the panic threshold, which is 1000 s
2148 * by default. This option allows the time to be set to any value
2149 * without restriction; however, this can happen only once.
2150 * If the threshold is exceeded after that, ntpd will exit
2151 * with a message to the system log. This option can be used
2152 * with the -q and -x options. See the tinker command for other options.
2154 * Chroot the server to the directory jaildir. This option also implies
2155 * that the server attempts to drop root privileges at startup
2156 * (otherwise, chroot gives very little additional security).
2157 * You may need to also specify a -u option.
2159 * Specify the name and path of the symmetric key file,
2160 * default /etc/ntp/keys. This is the same operation
2161 * as the "keys FILE" configuration command.
2163 * Specify the name and path of the log file. The default
2164 * is the system log file. This is the same operation as
2165 * the "logfile FILE" configuration command.
2166 * -L Do not listen to virtual IPs. The default is to listen.
2168 * -N To the extent permitted by the operating system,
2169 * run the ntpd at the highest priority.
2171 * Specify the name and path of the file used to record the ntpd
2172 * process ID. This is the same operation as the "pidfile FILE"
2173 * configuration command.
2175 * To the extent permitted by the operating system,
2176 * run the ntpd at the specified priority.
2177 * -q Exit the ntpd just after the first time the clock is set.
2178 * This behavior mimics that of the ntpdate program, which is
2179 * to be retired. The -g and -x options can be used with this option.
2180 * Note: The kernel time discipline is disabled with this option.
2182 * Specify the default propagation delay from the broadcast/multicast
2183 * server to this client. This is necessary only if the delay
2184 * cannot be computed automatically by the protocol.
2186 * Specify the directory path for files created by the statistics
2187 * facility. This is the same operation as the "statsdir DIR"
2188 * configuration command.
2190 * Add a key number to the trusted key list. This option can occur
2193 * Specify a user, and optionally a group, to switch to.
2196 * Add a system variable listed by default.
2197 * -x Normally, the time is slewed if the offset is less than the step
2198 * threshold, which is 128 ms by default, and stepped if above
2199 * the threshold. This option sets the threshold to 600 s, which is
2200 * well within the accuracy window to set the clock manually.
2201 * Note: since the slew rate of typical Unix kernels is limited
2202 * to 0.5 ms/s, each second of adjustment requires an amortization
2203 * interval of 2000 s. Thus, an adjustment as much as 600 s
2204 * will take almost 14 days to complete. This option can be used
2205 * with the -g and -q options. See the tinker command for other options.
2206 * Note: The kernel time discipline is disabled with this option.
2209 /* By doing init in a separate function we decrease stack usage
2212 static NOINLINE void ntp_init(char **argv)
2220 bb_error_msg_and_die(bb_msg_you_must_be_root);
2222 /* Set some globals */
2223 G.discipline_jitter = G_precision_sec;
2224 G.stratum = MAXSTRAT;
2226 G.poll_exp = BURSTPOLL; /* speeds up initial sync */
2227 G.last_script_run = G.reftime = G.last_update_recv_time = gettime1900d(); /* sets G.cur_time too */
2231 opt_complementary = "dd:wn" /* -d: counter; -p: list; -w implies -n */
2232 IF_FEATURE_NTPD_SERVER(":Il"); /* -I implies -l */
2233 opts = getopt32(argv,
2235 "wp:*S:"IF_FEATURE_NTPD_SERVER("l") /* NOT compat */
2236 IF_FEATURE_NTPD_SERVER("I:") /* compat */
2238 "46aAbgL", /* compat, ignored */
2239 &peers, &G.script_name,
2240 #if ENABLE_FEATURE_NTPD_SERVER
2245 // if (opts & OPT_x) /* disable stepping, only slew is allowed */
2246 // G.time_was_stepped = 1;
2248 #if ENABLE_FEATURE_NTPD_SERVER
2251 G_listen_fd = create_and_bind_dgram_or_die(NULL, 123);
2253 if (setsockopt_bindtodevice(G_listen_fd, G.if_name))
2256 socket_want_pktinfo(G_listen_fd);
2257 setsockopt_int(G_listen_fd, IPPROTO_IP, IP_TOS, IPTOS_LOWDELAY);
2260 /* I hesitate to set -20 prio. -15 should be high enough for timekeeping */
2262 setpriority(PRIO_PROCESS, 0, -15);
2264 if (!(opts & OPT_n)) {
2265 bb_daemonize_or_rexec(DAEMON_DEVNULL_STDIO, argv);
2266 logmode = LOGMODE_NONE;
2271 add_peers(llist_pop(&peers));
2273 #if ENABLE_FEATURE_NTPD_CONF
2278 parser = config_open("/etc/ntp.conf");
2279 while (config_read(parser, token, 3, 1, "# \t", PARSE_NORMAL)) {
2280 if (strcmp(token[0], "server") == 0 && token[1]) {
2281 add_peers(token[1]);
2284 bb_error_msg("skipping %s:%u: unimplemented command '%s'",
2285 "/etc/ntp.conf", parser->lineno, token[0]
2288 config_close(parser);
2291 if (G.peer_cnt == 0) {
2292 if (!(opts & OPT_l))
2294 /* -l but no peers: "stratum 1 server" mode */
2297 /* If network is up, syncronization occurs in ~10 seconds.
2298 * We give "ntpd -q" 10 seconds to get first reply,
2299 * then another 50 seconds to finish syncing.
2301 * I tested ntpd 4.2.6p1 and apparently it never exits
2302 * (will try forever), but it does not feel right.
2303 * The goal of -q is to act like ntpdate: set time
2304 * after a reasonably small period of polling, or fail.
2307 option_mask32 |= OPT_qq;
2324 int ntpd_main(int argc UNUSED_PARAM, char **argv) MAIN_EXTERNALLY_VISIBLE;
2325 int ntpd_main(int argc UNUSED_PARAM, char **argv)
2333 memset(&G, 0, sizeof(G));
2334 SET_PTR_TO_GLOBALS(&G);
2338 /* If ENABLE_FEATURE_NTPD_SERVER, + 1 for listen_fd: */
2339 cnt = G.peer_cnt + ENABLE_FEATURE_NTPD_SERVER;
2340 idx2peer = xzalloc(sizeof(idx2peer[0]) * cnt);
2341 pfd = xzalloc(sizeof(pfd[0]) * cnt);
2343 /* Countdown: we never sync before we sent INITIAL_SAMPLES+1
2344 * packets to each peer.
2345 * NB: if some peer is not responding, we may end up sending
2346 * fewer packets to it and more to other peers.
2347 * NB2: sync usually happens using INITIAL_SAMPLES packets,
2348 * since last reply does not come back instantaneously.
2350 cnt = G.peer_cnt * (INITIAL_SAMPLES + 1);
2352 write_pidfile(CONFIG_PID_FILE_PATH "/ntpd.pid");
2354 while (!bb_got_signal) {
2360 /* Nothing between here and poll() blocks for any significant time */
2362 nextaction = G.cur_time + 3600;
2365 #if ENABLE_FEATURE_NTPD_SERVER
2366 if (G_listen_fd != -1) {
2367 pfd[0].fd = G_listen_fd;
2368 pfd[0].events = POLLIN;
2372 /* Pass over peer list, send requests, time out on receives */
2373 for (item = G.ntp_peers; item != NULL; item = item->link) {
2374 peer_t *p = (peer_t *) item->data;
2376 if (p->next_action_time <= G.cur_time) {
2377 if (p->p_fd == -1) {
2378 /* Time to send new req */
2380 VERB4 bb_error_msg("disabling burst mode");
2381 G.polladj_count = 0;
2382 G.poll_exp = MINPOLL;
2384 send_query_to_peer(p);
2386 /* Timed out waiting for reply */
2389 /* If poll interval is small, increase it */
2390 if (G.poll_exp < BIGPOLL)
2391 adjust_poll(MINPOLL);
2392 timeout = poll_interval(NOREPLY_INTERVAL);
2393 bb_error_msg("timed out waiting for %s, reach 0x%02x, next query in %us",
2394 p->p_dotted, p->reachable_bits, timeout);
2396 /* What if don't see it because it changed its IP? */
2397 if (p->reachable_bits == 0)
2398 resolve_peer_hostname(p);
2400 set_next(p, timeout);
2404 if (p->next_action_time < nextaction)
2405 nextaction = p->next_action_time;
2408 /* Wait for reply from this peer */
2409 pfd[i].fd = p->p_fd;
2410 pfd[i].events = POLLIN;
2416 timeout = nextaction - G.cur_time;
2419 timeout++; /* (nextaction - G.cur_time) rounds down, compensating */
2421 /* Here we may block */
2423 if (i > (ENABLE_FEATURE_NTPD_SERVER && G_listen_fd != -1)) {
2424 /* We wait for at least one reply.
2425 * Poll for it, without wasting time for message.
2426 * Since replies often come under 1 second, this also
2427 * reduces clutter in logs.
2429 nfds = poll(pfd, i, 1000);
2435 bb_error_msg("poll:%us sockets:%u interval:%us", timeout, i, 1 << G.poll_exp);
2437 nfds = poll(pfd, i, timeout * 1000);
2439 gettime1900d(); /* sets G.cur_time */
2441 if (!bb_got_signal /* poll wasn't interrupted by a signal */
2442 && G.cur_time - G.last_script_run > 11*60
2444 /* Useful for updating battery-backed RTC and such */
2445 run_script("periodic", G.last_update_offset);
2446 gettime1900d(); /* sets G.cur_time */
2451 /* Process any received packets */
2453 #if ENABLE_FEATURE_NTPD_SERVER
2454 if (G.listen_fd != -1) {
2455 if (pfd[0].revents /* & (POLLIN|POLLERR)*/) {
2457 recv_and_process_client_pkt(/*G.listen_fd*/);
2458 gettime1900d(); /* sets G.cur_time */
2463 for (; nfds != 0 && j < i; j++) {
2464 if (pfd[j].revents /* & (POLLIN|POLLERR)*/) {
2466 * At init, alarm was set to 10 sec.
2467 * Now we did get a reply.
2468 * Increase timeout to 50 seconds to finish syncing.
2470 if (option_mask32 & OPT_qq) {
2471 option_mask32 &= ~OPT_qq;
2475 recv_and_process_peer_pkt(idx2peer[j]);
2476 gettime1900d(); /* sets G.cur_time */
2481 if (G.ntp_peers && G.stratum != MAXSTRAT) {
2482 for (item = G.ntp_peers; item != NULL; item = item->link) {
2483 peer_t *p = (peer_t *) item->data;
2484 if (p->reachable_bits)
2485 goto have_reachable_peer;
2487 /* No peer responded for last 8 packets, panic */
2488 clamp_pollexp_and_set_MAXSTRAT();
2489 run_script("unsync", 0.0);
2490 have_reachable_peer: ;
2492 } /* while (!bb_got_signal) */
2494 remove_pidfile(CONFIG_PID_FILE_PATH "/ntpd.pid");
2495 kill_myself_with_sig(bb_got_signal);
2503 /*** openntpd-4.6 uses only adjtime, not adjtimex ***/
2505 /*** ntp-4.2.6/ntpd/ntp_loopfilter.c - adjtimex usage ***/
2509 direct_freq(double fp_offset)
2513 * If the kernel is enabled, we need the residual offset to
2514 * calculate the frequency correction.
2516 if (pll_control && kern_enable) {
2517 memset(&ntv, 0, sizeof(ntv));
2520 clock_offset = ntv.offset / 1e9;
2521 #else /* STA_NANO */
2522 clock_offset = ntv.offset / 1e6;
2523 #endif /* STA_NANO */
2524 drift_comp = FREQTOD(ntv.freq);
2526 #endif /* KERNEL_PLL */
2527 set_freq((fp_offset - clock_offset) / (current_time - clock_epoch) + drift_comp);
2533 set_freq(double freq) /* frequency update */
2541 * If the kernel is enabled, update the kernel frequency.
2543 if (pll_control && kern_enable) {
2544 memset(&ntv, 0, sizeof(ntv));
2545 ntv.modes = MOD_FREQUENCY;
2546 ntv.freq = DTOFREQ(drift_comp);
2548 snprintf(tbuf, sizeof(tbuf), "kernel %.3f PPM", drift_comp * 1e6);
2549 report_event(EVNT_FSET, NULL, tbuf);
2551 snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp * 1e6);
2552 report_event(EVNT_FSET, NULL, tbuf);
2554 #else /* KERNEL_PLL */
2555 snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp * 1e6);
2556 report_event(EVNT_FSET, NULL, tbuf);
2557 #endif /* KERNEL_PLL */
2566 * This code segment works when clock adjustments are made using
2567 * precision time kernel support and the ntp_adjtime() system
2568 * call. This support is available in Solaris 2.6 and later,
2569 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
2570 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
2571 * DECstation 5000/240 and Alpha AXP, additional kernel
2572 * modifications provide a true microsecond clock and nanosecond
2573 * clock, respectively.
2575 * Important note: The kernel discipline is used only if the
2576 * step threshold is less than 0.5 s, as anything higher can
2577 * lead to overflow problems. This might occur if some misguided
2578 * lad set the step threshold to something ridiculous.
2580 if (pll_control && kern_enable) {
2582 #define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | MOD_STATUS | MOD_TIMECONST)
2585 * We initialize the structure for the ntp_adjtime()
2586 * system call. We have to convert everything to
2587 * microseconds or nanoseconds first. Do not update the
2588 * system variables if the ext_enable flag is set. In
2589 * this case, the external clock driver will update the
2590 * variables, which will be read later by the local
2591 * clock driver. Afterwards, remember the time and
2592 * frequency offsets for jitter and stability values and
2593 * to update the frequency file.
2595 memset(&ntv, 0, sizeof(ntv));
2597 ntv.modes = MOD_STATUS;
2600 ntv.modes = MOD_BITS | MOD_NANO;
2601 #else /* STA_NANO */
2602 ntv.modes = MOD_BITS;
2603 #endif /* STA_NANO */
2604 if (clock_offset < 0)
2609 ntv.offset = (int32)(clock_offset * 1e9 + dtemp);
2610 ntv.constant = sys_poll;
2611 #else /* STA_NANO */
2612 ntv.offset = (int32)(clock_offset * 1e6 + dtemp);
2613 ntv.constant = sys_poll - 4;
2614 #endif /* STA_NANO */
2615 ntv.esterror = (u_int32)(clock_jitter * 1e6);
2616 ntv.maxerror = (u_int32)((sys_rootdelay / 2 + sys_rootdisp) * 1e6);
2617 ntv.status = STA_PLL;
2620 * Enable/disable the PPS if requested.
2623 if (!(pll_status & STA_PPSTIME))
2624 report_event(EVNT_KERN,
2625 NULL, "PPS enabled");
2626 ntv.status |= STA_PPSTIME | STA_PPSFREQ;
2628 if (pll_status & STA_PPSTIME)
2629 report_event(EVNT_KERN,
2630 NULL, "PPS disabled");
2631 ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ);
2633 if (sys_leap == LEAP_ADDSECOND)
2634 ntv.status |= STA_INS;
2635 else if (sys_leap == LEAP_DELSECOND)
2636 ntv.status |= STA_DEL;
2640 * Pass the stuff to the kernel. If it squeals, turn off
2641 * the pps. In any case, fetch the kernel offset,
2642 * frequency and jitter.
2644 if (ntp_adjtime(&ntv) == TIME_ERROR) {
2645 if (!(ntv.status & STA_PPSSIGNAL))
2646 report_event(EVNT_KERN, NULL,
2649 pll_status = ntv.status;
2651 clock_offset = ntv.offset / 1e9;
2652 #else /* STA_NANO */
2653 clock_offset = ntv.offset / 1e6;
2654 #endif /* STA_NANO */
2655 clock_frequency = FREQTOD(ntv.freq);
2658 * If the kernel PPS is lit, monitor its performance.
2660 if (ntv.status & STA_PPSTIME) {
2662 clock_jitter = ntv.jitter / 1e9;
2663 #else /* STA_NANO */
2664 clock_jitter = ntv.jitter / 1e6;
2665 #endif /* STA_NANO */
2668 #if defined(STA_NANO) && NTP_API == 4
2670 * If the TAI changes, update the kernel TAI.
2672 if (loop_tai != sys_tai) {
2674 ntv.modes = MOD_TAI;
2675 ntv.constant = sys_tai;
2678 #endif /* STA_NANO */
2680 #endif /* KERNEL_PLL */